From 628d803b95a6aa93e82e6b6e53dce70a7513a522 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Hoguin?= Date: Wed, 27 Jan 2016 16:59:21 +0100 Subject: [PATCH 001/174] Batch writes to the index on requeue --- src/rabbit_variable_queue.erl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index d5b090bed472..45dde112a56c 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -688,12 +688,12 @@ requeue(AckTags, #vqstate { mode = default, State2), MsgCount = length(MsgIds2), {MsgIds2, a(reduce_memory_use( - maybe_update_rates( + maybe_update_rates(ui( State3 #vqstate { delta = Delta1, q3 = Q3a, q4 = Q4a, in_counter = InCounter + MsgCount, - len = Len + MsgCount })))}; + len = Len + MsgCount }))))}; requeue(AckTags, #vqstate { mode = lazy, delta = Delta, q3 = Q3, @@ -706,11 +706,11 @@ requeue(AckTags, #vqstate { mode = lazy, State1), MsgCount = length(MsgIds1), {MsgIds1, a(reduce_memory_use( - maybe_update_rates( + maybe_update_rates(ui( State2 #vqstate { delta = Delta1, q3 = Q3a, in_counter = InCounter + MsgCount, - len = Len + MsgCount })))}. + len = Len + MsgCount }))))}. ackfold(MsgFun, Acc, State, AckTags) -> {AccN, StateN} = @@ -2124,7 +2124,7 @@ publish_alpha(MsgStatus, State) -> {MsgStatus, stats({1, -1}, {MsgStatus, MsgStatus}, State)}. publish_beta(MsgStatus, State) -> - {MsgStatus1, State1} = maybe_write_to_disk(true, false, MsgStatus, State), + {MsgStatus1, State1} = maybe_prepare_write_to_disk(true, false, MsgStatus, State), MsgStatus2 = m(trim_msg_status(MsgStatus1)), {MsgStatus2, stats({1, -1}, {MsgStatus, MsgStatus2}, State1)}. @@ -2161,7 +2161,7 @@ delta_merge(SeqIds, Delta, MsgIds, State) -> {#msg_status { msg_id = MsgId } = MsgStatus, State1} = msg_from_pending_ack(SeqId, State0), {_MsgStatus, State2} = - maybe_write_to_disk(true, true, MsgStatus, State1), + maybe_prepare_write_to_disk(true, true, MsgStatus, State1), {expand_delta(SeqId, Delta0), [MsgId | MsgIds0], stats({1, -1}, {MsgStatus, none}, State2)} end, {Delta, MsgIds, State}, SeqIds). From b40dd3708c211f0bea90985a3e7a7a548a576c99 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Mon, 22 Feb 2016 15:02:44 +0000 Subject: [PATCH 002/174] Rabbit broker version check in plugins --- src/rabbit_plugins.erl | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index 2f084ed28a4c..a56c98b3d811 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -149,7 +149,9 @@ list(PluginsDir, IncludeRequiredDeps) -> {AvailablePlugins, Problems} = lists:foldl(fun ({error, EZ, Reason}, {Plugins1, Problems1}) -> {Plugins1, [{EZ, Reason} | Problems1]}; - (Plugin = #plugin{name = Name}, {Plugins1, Problems1}) -> + (Plugin = #plugin{name = Name, + rabbitmq_versions = Versions}, + {Plugins1, Problems1}) -> %% Applications RabbitMQ depends on (eg. %% "rabbit_common") can't be considered %% plugins, otherwise rabbitmq-plugins would @@ -157,7 +159,13 @@ list(PluginsDir, IncludeRequiredDeps) -> %% disable them. case IncludeRequiredDeps orelse not lists:member(Name, RabbitDeps) of - true -> {[Plugin|Plugins1], Problems1}; + true -> + case check_rabbit_version(Versions) of + ok -> + {[Plugin|Plugins1], Problems1}; + {error, Err} -> + {Plugins1, [Err | Problems1]} + end; false -> {Plugins1, Problems1} end end, {[], []}, @@ -171,6 +179,22 @@ list(PluginsDir, IncludeRequiredDeps) -> AvailablePlugins), ensure_dependencies(Plugins). +check_rabbit_version([]) -> ok; +check_rabbit_version(Versions) -> + RabbitVersion = case application:get_key(rabbit, vsn) of + undefined -> "0.0.0"; + {ok, Val} -> Val + end, + case lists:any(fun(V) -> + rabbit_misc:version_minor_equivalent(V, RabbitVersion) + andalso + rabbit_misc:version_compare(V, RabbitVersion, lte) + end, + Versions) of + true -> ok; + false -> {error, {version_mismatch, {RabbitVersion, Versions}}} + end. + %% @doc Read the list of enabled plugins from the supplied term file. read_enabled(PluginsFile) -> case rabbit_file:read_term_file(PluginsFile) of @@ -330,8 +354,10 @@ mkplugin(Name, Props, Type, Location) -> Version = proplists:get_value(vsn, Props, "0"), Description = proplists:get_value(description, Props, ""), Dependencies = proplists:get_value(applications, Props, []), + RabbitmqVersions = proplists:get_value(rabbitmq_versions, Props, []), #plugin{name = Name, version = Version, description = Description, - dependencies = Dependencies, location = Location, type = Type}. + dependencies = Dependencies, location = Location, type = Type, + rabbitmq_versions = RabbitmqVersions}. read_app_file(EZ) -> case zip:list_dir(EZ) of From de353f83314c38a2aa64aa6f97b72ed7839b5add Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Mon, 22 Feb 2016 16:07:51 +0000 Subject: [PATCH 003/174] Clean version check function --- src/rabbit_plugins.erl | 61 ++++++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 29 deletions(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index a56c98b3d811..46abb9860cd6 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -21,6 +21,7 @@ -export([setup/0, active/0, read_enabled/1, list/1, list/2, dependencies/3]). -export([ensure/1]). -export([extract_schemas/1]). +-export([version_support/2]). %%---------------------------------------------------------------------------- @@ -147,29 +148,35 @@ list(PluginsDir, IncludeRequiredDeps) -> application:load(rabbit), {ok, RabbitDeps} = application:get_key(rabbit, applications), {AvailablePlugins, Problems} = - lists:foldl(fun ({error, EZ, Reason}, {Plugins1, Problems1}) -> - {Plugins1, [{EZ, Reason} | Problems1]}; - (Plugin = #plugin{name = Name, - rabbitmq_versions = Versions}, - {Plugins1, Problems1}) -> - %% Applications RabbitMQ depends on (eg. - %% "rabbit_common") can't be considered - %% plugins, otherwise rabbitmq-plugins would - %% list them and the user may believe he can - %% disable them. - case IncludeRequiredDeps orelse - not lists:member(Name, RabbitDeps) of - true -> - case check_rabbit_version(Versions) of - ok -> - {[Plugin|Plugins1], Problems1}; - {error, Err} -> - {Plugins1, [Err | Problems1]} - end; - false -> {Plugins1, Problems1} - end - end, {[], []}, - [plugin_info(PluginsDir, Plug) || Plug <- EZs ++ FreeApps]), + lists:foldl( + fun ({error, EZ, Reason}, {Plugins1, Problems1}) -> + {Plugins1, [{EZ, Reason} | Problems1]}; + (Plugin = #plugin{name = Name, + rabbitmq_versions = Versions}, + {Plugins1, Problems1}) -> + %% Applications RabbitMQ depends on (eg. + %% "rabbit_common") can't be considered + %% plugins, otherwise rabbitmq-plugins would + %% list them and the user may believe he can + %% disable them. + case IncludeRequiredDeps orelse + not lists:member(Name, RabbitDeps) of + true -> + RabbitVersion = case application:get_key(rabbit, + vsn) of + undefined -> "0.0.0"; + {ok, Val} -> Val + end, + case version_support(RabbitVersion, Versions) of + ok -> + {[Plugin|Plugins1], Problems1}; + {error, Err} -> + {Plugins1, [Err | Problems1]} + end; + false -> {Plugins1, Problems1} + end + end, {[], []}, + [plugin_info(PluginsDir, Plug) || Plug <- EZs ++ FreeApps]), case Problems of [] -> ok; _ -> rabbit_log:warning( @@ -179,12 +186,8 @@ list(PluginsDir, IncludeRequiredDeps) -> AvailablePlugins), ensure_dependencies(Plugins). -check_rabbit_version([]) -> ok; -check_rabbit_version(Versions) -> - RabbitVersion = case application:get_key(rabbit, vsn) of - undefined -> "0.0.0"; - {ok, Val} -> Val - end, +version_support(_RabbitVersion, []) -> ok; +version_support(RabbitVersion, Versions) -> case lists:any(fun(V) -> rabbit_misc:version_minor_equivalent(V, RabbitVersion) andalso From a2222acd34d5b281e66860e3c88f903d6d8d2e01 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Wed, 23 Mar 2016 09:53:55 +0000 Subject: [PATCH 004/174] plugin versions --- src/rabbit_plugins.erl | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index 46abb9860cd6..c5d08262d238 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -184,7 +184,17 @@ list(PluginsDir, IncludeRequiredDeps) -> end, Plugins = lists:filter(fun(P) -> not plugin_provided_by_otp(P) end, AvailablePlugins), - ensure_dependencies(Plugins). + ensure_plugins_versions(ensure_dependencies(Plugins)). + +ensure_plugins_versions(Plugins) -> + PluginsVersions = [{Name, Vsn} + || #plugin{name = Name, version = Vsn} <- Plugins], + lists:foldl( + fun(Plugin = #plugin{name = Name, plugins_versions = PluginsVersions}, + {Plugins1, Problems1}) -> + + + ) version_support(_RabbitVersion, []) -> ok; version_support(RabbitVersion, Versions) -> @@ -358,9 +368,11 @@ mkplugin(Name, Props, Type, Location) -> Description = proplists:get_value(description, Props, ""), Dependencies = proplists:get_value(applications, Props, []), RabbitmqVersions = proplists:get_value(rabbitmq_versions, Props, []), + PluginsVersions = proplists:get_value(plugins_versions, Props, []), #plugin{name = Name, version = Version, description = Description, dependencies = Dependencies, location = Location, type = Type, - rabbitmq_versions = RabbitmqVersions}. + rabbitmq_versions = RabbitmqVersions, + plugins_versions = PluginsVersions}. read_app_file(EZ) -> case zip:list_dir(EZ) of From 03a7fdeb073b2aa22d70c45d586c747bb240c4c8 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Wed, 23 Mar 2016 14:32:45 +0000 Subject: [PATCH 005/174] Plugin versions check --- src/rabbit_plugins.erl | 50 +++++++++++++++++++++++++++++++++--------- 1 file changed, 40 insertions(+), 10 deletions(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index c5d08262d238..81830da6a835 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -162,15 +162,15 @@ list(PluginsDir, IncludeRequiredDeps) -> case IncludeRequiredDeps orelse not lists:member(Name, RabbitDeps) of true -> - RabbitVersion = case application:get_key(rabbit, + RabbitVersion = case application:get_key(rabbit, vsn) of undefined -> "0.0.0"; {ok, Val} -> Val end, case version_support(RabbitVersion, Versions) of - ok -> + ok -> {[Plugin|Plugins1], Problems1}; - {error, Err} -> + {error, Err} -> {Plugins1, [Err | Problems1]} end; false -> {Plugins1, Problems1} @@ -187,20 +187,50 @@ list(PluginsDir, IncludeRequiredDeps) -> ensure_plugins_versions(ensure_dependencies(Plugins)). ensure_plugins_versions(Plugins) -> - PluginsVersions = [{Name, Vsn} + ExistingVersions = [{Name, Vsn} || #plugin{name = Name, version = Vsn} <- Plugins], - lists:foldl( - fun(Plugin = #plugin{name = Name, plugins_versions = PluginsVersions}, + {GoodPlugins, Problems} = lists:foldl( + fun(Plugin = #plugin{name = Name, plugins_versions = DepsVersions}, {Plugins1, Problems1}) -> - + case check_plugins_versions(ExistingVersions, DepsVersions) of + ok -> {[Plugin | Plugins1], Problems1}; + {error, Err} -> {Plugins1, [{Name, Err} | Problems1]} + end + end, + {[],[]}, + Plugins), + case Problems of + [] -> ok; + _ -> rabbit_log:warning("Some plugin veriosns do not match: ~p~n", + [Problems]) + end, + GoodPlugins. + +check_plugins_versions(ExistingVersions, RequiredVersions) -> + Problems = lists:foldl( + fun({Name, Versions}, Acc) -> + case proplists:get_value(Name, ExistingVersions) of + undefined -> [{missing_dependency, Name} | Acc]; + Version -> + case version_support(Version, Versions) of + {error, Err} -> [{Err, Name} | Acc]; + ok -> Acc + end + end + end, + [], + RequiredVersions), + case Problems of + [] -> ok; + _ -> {error, Problems} + end. - ) version_support(_RabbitVersion, []) -> ok; version_support(RabbitVersion, Versions) -> case lists:any(fun(V) -> rabbit_misc:version_minor_equivalent(V, RabbitVersion) - andalso + andalso rabbit_misc:version_compare(V, RabbitVersion, lte) end, Versions) of @@ -371,7 +401,7 @@ mkplugin(Name, Props, Type, Location) -> PluginsVersions = proplists:get_value(plugins_versions, Props, []), #plugin{name = Name, version = Version, description = Description, dependencies = Dependencies, location = Location, type = Type, - rabbitmq_versions = RabbitmqVersions, + rabbitmq_versions = RabbitmqVersions, plugins_versions = PluginsVersions}. read_app_file(EZ) -> From d3676b51bb04ec2ea14076ff8f70368f0d586b6d Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Thu, 24 Mar 2016 14:01:33 +0000 Subject: [PATCH 006/174] Check deps versions before missing dependencies --- src/rabbit_plugins.erl | 65 +++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 39 deletions(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index 81830da6a835..cf14a2efe22d 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -24,7 +24,7 @@ -export([version_support/2]). %%---------------------------------------------------------------------------- - +-compile(export_all). -ifdef(use_specs). -type(plugin_name() :: atom()). @@ -98,14 +98,14 @@ extract_schemas(SchemaDir) -> ok. extract_schema(#plugin{type = ez, location = Location}, SchemaDir) -> - {ok, Files} = zip:extract(Location, - [memory, {file_filter, - fun(#zip_file{name = Name}) -> - string:str(Name, "priv/schema") > 0 + {ok, Files} = zip:extract(Location, + [memory, {file_filter, + fun(#zip_file{name = Name}) -> + string:str(Name, "priv/schema") > 0 end}]), lists:foreach( fun({FileName, Content}) -> - ok = file:write_file(filename:join([SchemaDir, + ok = file:write_file(filename:join([SchemaDir, filename:basename(FileName)]), Content) end, @@ -113,16 +113,16 @@ extract_schema(#plugin{type = ez, location = Location}, SchemaDir) -> ok; extract_schema(#plugin{type = dir, location = Location}, SchemaDir) -> PluginSchema = filename:join([Location, - "priv", - "schema"]), + "priv", + "schema"]), case rabbit_file:is_dir(PluginSchema) of false -> ok; - true -> - PluginSchemaFiles = + true -> + PluginSchemaFiles = [ filename:join(PluginSchema, FileName) - || FileName <- rabbit_file:wildcard(".*\\.schema", + || FileName <- rabbit_file:wildcard(".*\\.schema", PluginSchema) ], - [ file:copy(SchemaFile, SchemaDir) + [ file:copy(SchemaFile, SchemaDir) || SchemaFile <- PluginSchemaFiles ] end. @@ -147,12 +147,14 @@ list(PluginsDir, IncludeRequiredDeps) -> %% instance. application:load(rabbit), {ok, RabbitDeps} = application:get_key(rabbit, applications), + AllPlugins = [plugin_info(PluginsDir, Plug) || Plug <- EZs ++ FreeApps], {AvailablePlugins, Problems} = lists:foldl( fun ({error, EZ, Reason}, {Plugins1, Problems1}) -> {Plugins1, [{EZ, Reason} | Problems1]}; (Plugin = #plugin{name = Name, - rabbitmq_versions = Versions}, + rabbitmq_versions = Versions, + plugins_versions = PluginsVersions}, {Plugins1, Problems1}) -> %% Applications RabbitMQ depends on (eg. %% "rabbit_common") can't be considered @@ -167,16 +169,19 @@ list(PluginsDir, IncludeRequiredDeps) -> undefined -> "0.0.0"; {ok, Val} -> Val end, - case version_support(RabbitVersion, Versions) of - ok -> + RabbitVersionValid = version_support(RabbitVersion, Versions), + DepsVersionsValid = check_plugins_versions(AllPlugins, PluginsVersions), + case [RabbitVersionValid, DepsVersionsValid] of + [ok, ok] -> {[Plugin|Plugins1], Problems1}; - {error, Err} -> - {Plugins1, [Err | Problems1]} + Errs -> + Errors = [Err || Err <- Errs, Err =/= ok], + {Plugins1, [{Name, Errors} | Problems1]} end; false -> {Plugins1, Problems1} end end, {[], []}, - [plugin_info(PluginsDir, Plug) || Plug <- EZs ++ FreeApps]), + AllPlugins), case Problems of [] -> ok; _ -> rabbit_log:warning( @@ -184,29 +189,11 @@ list(PluginsDir, IncludeRequiredDeps) -> end, Plugins = lists:filter(fun(P) -> not plugin_provided_by_otp(P) end, AvailablePlugins), - ensure_plugins_versions(ensure_dependencies(Plugins)). + ensure_dependencies(Plugins). -ensure_plugins_versions(Plugins) -> +check_plugins_versions(AllPlugins, RequiredVersions) -> ExistingVersions = [{Name, Vsn} - || #plugin{name = Name, version = Vsn} <- Plugins], - {GoodPlugins, Problems} = lists:foldl( - fun(Plugin = #plugin{name = Name, plugins_versions = DepsVersions}, - {Plugins1, Problems1}) -> - case check_plugins_versions(ExistingVersions, DepsVersions) of - ok -> {[Plugin | Plugins1], Problems1}; - {error, Err} -> {Plugins1, [{Name, Err} | Problems1]} - end - end, - {[],[]}, - Plugins), - case Problems of - [] -> ok; - _ -> rabbit_log:warning("Some plugin veriosns do not match: ~p~n", - [Problems]) - end, - GoodPlugins. - -check_plugins_versions(ExistingVersions, RequiredVersions) -> + || #plugin{name = Name, version = Vsn} <- AllPlugins], Problems = lists:foldl( fun({Name, Versions}, Acc) -> case proplists:get_value(Name, ExistingVersions) of From 0feb95d76c18a9a8ba67acaf4f638fd4524abf32 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Fri, 18 Mar 2016 16:26:04 +0000 Subject: [PATCH 007/174] Support extensions in config files --- scripts/rabbitmq-env.bat | 8 ++++++- scripts/rabbitmq-server | 28 +++++++++++++++++++---- scripts/rabbitmq-server.bat | 38 +++++++++++++++++++++++-------- scripts/rabbitmq-service.bat | 37 +++++++++++++++++++++++++----- src/rabbit_prelaunch.erl | 44 ++++++++++++++++++++---------------- 5 files changed, 115 insertions(+), 40 deletions(-) diff --git a/scripts/rabbitmq-env.bat b/scripts/rabbitmq-env.bat index 430395accf1c..c29ef5d5cccc 100644 --- a/scripts/rabbitmq-env.bat +++ b/scripts/rabbitmq-env.bat @@ -77,7 +77,7 @@ if "!RABBITMQ_NODENAME!"=="" ( if "!NODENAME!"=="" ( REM We use Erlang to query the local hostname because REM !COMPUTERNAME! and Erlang may return different results. - REM Start erl with -sname to make sure epmd is started. + REM Start erl with -sname to make sure epmd is started. call "%ERLANG_HOME%\bin\erl.exe" -A0 -noinput -boot start_clean -sname rabbit-prelaunch-epmd -eval "init:stop()." >nul 2>&1 for /f "delims=" %%F in ('call "%ERLANG_HOME%\bin\erl.exe" -A0 -noinput -boot start_clean -eval "net_kernel:start([list_to_atom(""rabbit-gethostname-"" ++ os:getpid()), %NAMETYPE%]), [_, H] = string:tokens(atom_to_list(node()), ""@""), io:format(""~s~n"", [H]), init:stop()."') do @set HOSTNAME=%%F set RABBITMQ_NODENAME=rabbit@!HOSTNAME! @@ -150,6 +150,8 @@ REM [ "x" = "x$RABBITMQ_SERVER_ERL_ARGS" ] && RABBITMQ_SERVER_ERL_ARGS=${SERVER_ REM No Windows equivalent REM [ "x" = "x$RABBITMQ_CONFIG_FILE" ] && RABBITMQ_CONFIG_FILE=${CONFIG_FILE} + +CALL :unquote RABBITMQ_CONFIG_FILE %RABBITMQ_CONFIG_FILE% if "!RABBITMQ_CONFIG_FILE!"=="" ( if "!CONFIG_FILE!"=="" ( set RABBITMQ_CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq @@ -417,3 +419,7 @@ REM ##--- End of overridden variables REM REM # Since we source this elsewhere, don't accidentally stop execution REM true + +:unquote +set %1=%~2 +EXIT /B 0 diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server index 0e3f06baf981..af74e0115ec5 100755 --- a/scripts/rabbitmq-server +++ b/scripts/rabbitmq-server @@ -95,10 +95,15 @@ fi set -e -if [ -f "${RABBITMQ_CONFIG_FILE}.config" ]; then - RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE}" -elif [ -f "${RABBITMQ_CONFIG_FILE}.conf" ]; then - RABBITMQ_CONFIG_ARG="-conf ${RABBITMQ_CONFIG_FILE} \ +RABBITMQ_CONFIG_FILE_NOEX="${RABBITMQ_CONFIG_FILE%.*}" +echo "NOEX: ${RABBITMQ_CONFIG_FILE_NOEX}" + +if [ "${RABBITMQ_CONFIG_FILE_NOEX}.config" == "${RABBITMQ_CONFIG_FILE}" ]; then + if [ -f "${RABBITMQ_CONFIG_FILE}" ]; then + RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE_NOEX}" + fi +elif [ "${RABBITMQ_CONFIG_FILE_NOEX}.conf" == "${RABBITMQ_CONFIG_FILE}" ]; then + RABBITMQ_CONFIG_ARG="-conf ${RABBITMQ_CONFIG_FILE_NOEX} \ -conf_dir ${RABBITMQ_GENERATED_CONFIG_DIR} \ -conf_script_dir `dirname $0` \ -conf_schema_dir ${RABBITMQ_SCHEMA_DIR}" @@ -107,6 +112,21 @@ elif [ -f "${RABBITMQ_CONFIG_FILE}.conf" ]; then -conf_advanced ${RABBITMQ_ADVANCED_CONFIG_FILE} \ -config ${RABBITMQ_ADVANCED_CONFIG_FILE}" fi +elif [ "${RABBITMQ_CONFIG_FILE_NOEX}" == "${RABBITMQ_CONFIG_FILE}" ]; then + if [ -f "${RABBITMQ_CONFIG_FILE}.config" ]; then + RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE_NOEX}" + elif [ -f "${RABBITMQ_CONFIG_FILE}.conf" ]; then + echo "NOEX CONF" + RABBITMQ_CONFIG_ARG="-conf ${RABBITMQ_CONFIG_FILE_NOEX} \ + -conf_dir ${RABBITMQ_GENERATED_CONFIG_DIR} \ + -conf_script_dir `dirname $0` \ + -conf_schema_dir ${RABBITMQ_SCHEMA_DIR}" + if [ -f "${RABBITMQ_ADVANCED_CONFIG_FILE}.config" ]; then + RABBITMQ_CONFIG_ARG="${RABBITMQ_CONFIG_ARG} \ + -conf_advanced ${RABBITMQ_ADVANCED_CONFIG_FILE} \ + -config ${RABBITMQ_ADVANCED_CONFIG_FILE}" + fi + fi fi RABBITMQ_LISTEN_ARG= diff --git a/scripts/rabbitmq-server.bat b/scripts/rabbitmq-server.bat index 2a38e77a82e9..034abd472194 100644 --- a/scripts/rabbitmq-server.bat +++ b/scripts/rabbitmq-server.bat @@ -42,8 +42,6 @@ if not exist "!ERLANG_HOME!\bin\erl.exe" ( set RABBITMQ_EBIN_ROOT=!RABBITMQ_HOME!\ebin -set RABBITMQ_CONFIG_FILE="!RABBITMQ_CONFIG_FILE!" - "!ERLANG_HOME!\bin\erl.exe" ^ -pa "!RABBITMQ_EBIN_ROOT!" ^ -noinput -hidden ^ @@ -68,10 +66,28 @@ if not exist "!RABBITMQ_SCHEMA_DIR!\rabbitmq.schema" ( set RABBITMQ_EBIN_PATH="-pa !RABBITMQ_EBIN_ROOT!" -if exist "!RABBITMQ_CONFIG_FILE!.config" ( - set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" -) else if exist "!RABBITMQ_CONFIG_FILE!.conf" ( - set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE!" ^ +CALL :get_noex !RABBITMQ_CONFIG_FILE! + +if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!" ( + if exist "!RABBITMQ_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE_NOEX!" + ) else if exist "!RABBITMQ_CONFIG_FILE!.conf" ( + set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE_NOEX!" ^ + -conf_dir !RABBITMQ_GENERATED_CONFIG_DIR! ^ + -conf_script_dir !CONF_SCRIPT_DIR:\=/! ^ + -conf_schema_dir !RABBITMQ_SCHEMA_DIR! + if exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=!RABBITMQ_CONFIG_ARG! ^ + -conf_advanced "!RABBITMQ_ADVANCED_CONFIG_FILE!" ^ + -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" + ) + ) +) else if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( + if exist "!RABBITMQ_CONFIG_FILE!" ( + set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE_NOEX!" + ) +) else if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.conf" ( + set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE_NOEX!" ^ -conf_dir !RABBITMQ_GENERATED_CONFIG_DIR! ^ -conf_script_dir !CONF_SCRIPT_DIR:\=/! ^ -conf_schema_dir !RABBITMQ_SCHEMA_DIR! @@ -80,7 +96,7 @@ if exist "!RABBITMQ_CONFIG_FILE!.config" ( -conf_advanced "!RABBITMQ_ADVANCED_CONFIG_FILE!" ^ -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" ) -) +) set RABBITMQ_LISTEN_ARG= if not "!RABBITMQ_NODE_IP_ADDRESS!"=="" ( @@ -114,7 +130,7 @@ if "!RABBITMQ_IO_THREAD_POOL_SIZE!"=="" ( set ENV_OK=true -CALL :check_not_empty "RABBITMQ_BOOT_MODULE" !RABBITMQ_BOOT_MODULE! +CALL :check_not_empty "RABBITMQ_BOOT_MODULE" !RABBITMQ_BOOT_MODULE! CALL :check_not_empty "RABBITMQ_NAME_TYPE" !RABBITMQ_NAME_TYPE! CALL :check_not_empty "RABBITMQ_NODENAME" !RABBITMQ_NODENAME! @@ -157,10 +173,14 @@ EXIT /B 0 if "%~2"=="" ( ECHO "Error: ENV variable should be defined: %1. Please check rabbitmq-env and rabbitmq-defaults, and !RABBITMQ_CONF_ENV_FILE! script files. Check also your Environment Variables settings" set ENV_OK=false - EXIT /B 78 + EXIT /B 78 ) EXIT /B 0 +:get_noex +set RABBITMQ_CONFIG_FILE_NOEX=%~dpn1 +EXIT /B 0 + endlocal endlocal diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index 7e80e78398dc..58e92eb36dd4 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -157,12 +157,32 @@ if not exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( echo []. > !RABBITMQ_ADVANCED_CONFIG_FILE!.config ) -if exist "!RABBITMQ_CONFIG_FILE!.config" ( - set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" -) else ( - rem Always specify generated config arguments, we cannot - rem assume .conf file is available - set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE!" ^ +CALL :get_noex !RABBITMQ_CONFIG_FILE! + +if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!" ( + if exist "!RABBITMQ_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE_NOEX!" + ) else if exist "!RABBITMQ_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" + ) else ( + rem Always specify generated config arguments, we cannot + rem assume .conf file is available + set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE!" ^ + -conf_dir !RABBITMQ_GENERATED_CONFIG_DIR! ^ + -conf_script_dir !CONF_SCRIPT_DIR:\=/! ^ + -conf_schema_dir !RABBITMQ_SCHEMA_DIR! + if exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=!RABBITMQ_CONFIG_ARG! ^ + -conf_advanced "!RABBITMQ_ADVANCED_CONFIG_FILE!" ^ + -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" + ) + ) +) else if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( + if exist "!RABBITMQ_CONFIG_FILE!" ( + set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE_NOEX!" + ) +) else if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.conf" ( + set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE_NOEX!" ^ -conf_dir !RABBITMQ_GENERATED_CONFIG_DIR! ^ -conf_script_dir !CONF_SCRIPT_DIR:\=/! ^ -conf_schema_dir !RABBITMQ_SCHEMA_DIR! @@ -173,6 +193,7 @@ if exist "!RABBITMQ_CONFIG_FILE!.config" ( ) ) + set RABBITMQ_LISTEN_ARG= if not "!RABBITMQ_NODE_IP_ADDRESS!"=="" ( if not "!RABBITMQ_NODE_PORT!"=="" ( @@ -267,5 +288,9 @@ if "%~2"=="" ( ) EXIT /B 0 +:get_noex +set RABBITMQ_CONFIG_FILE_NOEX=%~dpn1 +EXIT /B 0 + endlocal endlocal diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 3f83a153eaae..e87266320028 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -79,35 +79,39 @@ duplicate_node_check(NodeName, NodeHost) -> end. dist_port_set_check() -> - case os:getenv("RABBITMQ_CONFIG_FILE") of - false -> + case get_config(os:getenv("RABBITMQ_CONFIG_FILE")) of + {ok, [Config]} -> + Kernel = pget(kernel, Config, []), + case {pget(inet_dist_listen_min, Kernel, none), + pget(inet_dist_listen_max, Kernel, none)} of + {none, none} -> ok; + _ -> rabbit_misc:quit(?DO_NOT_SET_DIST_PORT) + end; + {ok, _} -> ok; - File -> - case get_config(File) of - {ok, [Config]} -> - Kernel = pget(kernel, Config, []), - case {pget(inet_dist_listen_min, Kernel, none), - pget(inet_dist_listen_max, Kernel, none)} of - {none, none} -> ok; - _ -> rabbit_misc:quit(?DO_NOT_SET_DIST_PORT) - end; - {ok, _} -> - ok; - {error, _} -> - ok - end + {error, _} -> + ok end. -get_config(File) -> - case rabbit_file:is_file(File ++ ".config") of - true -> file:consult(File ++ ".config"); - false -> +get_config(File) -> + case consult_file(File) of + {ok, Contents} -> {ok, Contents}; + {error, _} -> case rabbit_config:get_advanced_config() of none -> {error, enoent}; FileName -> file:consult(FileName) end end. +consult_file(false) -> {error, nofile}; +consult_file(File) -> + FileName = case filename:extension(File) of + "" -> File ++ ".config"; + ".config" -> File; + _ -> "" + end, + file:consult(FileName). + dist_port_range_check() -> case os:getenv("RABBITMQ_DIST_PORT") of false -> ok; From a610386356823523f5cd93638de2646d8126baff Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Tue, 29 Mar 2016 14:15:44 +0100 Subject: [PATCH 008/174] Remove debug echoes --- scripts/rabbitmq-server | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server index af74e0115ec5..0a67e00415b8 100755 --- a/scripts/rabbitmq-server +++ b/scripts/rabbitmq-server @@ -96,7 +96,6 @@ fi set -e RABBITMQ_CONFIG_FILE_NOEX="${RABBITMQ_CONFIG_FILE%.*}" -echo "NOEX: ${RABBITMQ_CONFIG_FILE_NOEX}" if [ "${RABBITMQ_CONFIG_FILE_NOEX}.config" == "${RABBITMQ_CONFIG_FILE}" ]; then if [ -f "${RABBITMQ_CONFIG_FILE}" ]; then @@ -116,7 +115,6 @@ elif [ "${RABBITMQ_CONFIG_FILE_NOEX}" == "${RABBITMQ_CONFIG_FILE}" ]; then if [ -f "${RABBITMQ_CONFIG_FILE}.config" ]; then RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE_NOEX}" elif [ -f "${RABBITMQ_CONFIG_FILE}.conf" ]; then - echo "NOEX CONF" RABBITMQ_CONFIG_ARG="-conf ${RABBITMQ_CONFIG_FILE_NOEX} \ -conf_dir ${RABBITMQ_GENERATED_CONFIG_DIR} \ -conf_script_dir `dirname $0` \ From 41ce5ad808863944cd6d62ce7f7e2271f1010582 Mon Sep 17 00:00:00 2001 From: Alexey Lebedeff Date: Wed, 30 Mar 2016 17:27:23 +0300 Subject: [PATCH 009/174] Guess thread pool size on startup Fixes #151 --- scripts/rabbitmq-defaults | 1 - scripts/rabbitmq-server | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) mode change 100644 => 100755 scripts/rabbitmq-defaults diff --git a/scripts/rabbitmq-defaults b/scripts/rabbitmq-defaults old mode 100644 new mode 100755 index c5d87822a24c..baffce80de49 --- a/scripts/rabbitmq-defaults +++ b/scripts/rabbitmq-defaults @@ -40,6 +40,5 @@ MNESIA_BASE=${SYS_PREFIX}/var/lib/rabbitmq/mnesia ENABLED_PLUGINS_FILE=${SYS_PREFIX}/etc/rabbitmq/enabled_plugins PLUGINS_DIR="${RABBITMQ_HOME}/plugins" -IO_THREAD_POOL_SIZE=64 CONF_ENV_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq-env.conf diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server index 668867abfbb1..8ed7319b41ef 100755 --- a/scripts/rabbitmq-server +++ b/scripts/rabbitmq-server @@ -117,7 +117,23 @@ fi # there is no other way of preventing their expansion. set -f +# Lazy initialization of threed pool size - if it wasn't set +# explicitly. This parameter is only needed when server is starting, +# so it makes no sense to do this calculations in rabbitmq-env or +# rabbitmq-defaults scripts. +ensure_thread_pool_size() { + if [ -z "${RABBITMQ_IO_THREAD_POOL_SIZE}" ]; then + RABBITMQ_IO_THREAD_POOL_SIZE=$( + ${ERL_DIR}erl -pa "$RABBITMQ_EBIN_ROOT" \ + -boot "${CLEAN_BOOT_FILE}" \ + -noinput \ + -s rabbit_misc report_default_thread_pool_size + ) + fi +} + start_rabbitmq_server() { + ensure_thread_pool_size check_start_params && RABBITMQ_CONFIG_FILE=$RABBITMQ_CONFIG_FILE \ exec ${ERL_DIR}erl \ From 93dbc2ff09234740caedb4e9cd9610f79c7a4e9d Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Thu, 31 Mar 2016 02:51:02 +0300 Subject: [PATCH 010/174] Strip off trailing .conf/.config using dirname and basename Otherwise if there is no dot in RABBITMQ_CONFIG_FILE, RABBITMQ_CONFIG_FILE_NOEX ends up being empty. --- scripts/rabbitmq-server | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server index 0a67e00415b8..7853fea2f4e3 100755 --- a/scripts/rabbitmq-server +++ b/scripts/rabbitmq-server @@ -95,7 +95,15 @@ fi set -e -RABBITMQ_CONFIG_FILE_NOEX="${RABBITMQ_CONFIG_FILE%.*}" +strip_trailing_config_or_conf() { + local dir=$(dirname $1) + local filename_without_conf=$(basename $1 .conf) + local filename_without_config=$(basename $filename_without_conf .config) + + echo "$dir/$filename_without_config" +} + +RABBITMQ_CONFIG_FILE_NOEX=$(strip_trailing_config_or_conf $RABBITMQ_CONFIG_FILE) if [ "${RABBITMQ_CONFIG_FILE_NOEX}.config" == "${RABBITMQ_CONFIG_FILE}" ]; then if [ -f "${RABBITMQ_CONFIG_FILE}" ]; then From a4496d44e5304372d0448baa91ff26fd07efe1db Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Thu, 31 Mar 2016 10:13:26 +0100 Subject: [PATCH 011/174] Condition ordering to set RABBITMQ_CONFIG_ARG --- scripts/rabbitmq-server | 8 ++++---- scripts/rabbitmq-server.bat | 32 ++++++++++++++++---------------- scripts/rabbitmq-service.bat | 33 +++++++++++++++++---------------- 3 files changed, 37 insertions(+), 36 deletions(-) diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server index 7853fea2f4e3..24c035d92a3a 100755 --- a/scripts/rabbitmq-server +++ b/scripts/rabbitmq-server @@ -77,7 +77,7 @@ RABBITMQ_DIST_PORT=$RABBITMQ_DIST_PORT \ -rabbit enabled_plugins_file "\"$RABBITMQ_ENABLED_PLUGINS_FILE\"" \ -rabbit plugins_dir "\"$RABBITMQ_PLUGINS_DIR\"" \ -extra "${RABBITMQ_NODENAME}" - + PRELAUNCH_RESULT=$? if [ ${PRELAUNCH_RESULT} = 2 ] ; then # dist port is mentioned in config, so do not set it @@ -119,11 +119,11 @@ elif [ "${RABBITMQ_CONFIG_FILE_NOEX}.conf" == "${RABBITMQ_CONFIG_FILE}" ]; then -conf_advanced ${RABBITMQ_ADVANCED_CONFIG_FILE} \ -config ${RABBITMQ_ADVANCED_CONFIG_FILE}" fi -elif [ "${RABBITMQ_CONFIG_FILE_NOEX}" == "${RABBITMQ_CONFIG_FILE}" ]; then +else if [ -f "${RABBITMQ_CONFIG_FILE}.config" ]; then - RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE_NOEX}" + RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE}" elif [ -f "${RABBITMQ_CONFIG_FILE}.conf" ]; then - RABBITMQ_CONFIG_ARG="-conf ${RABBITMQ_CONFIG_FILE_NOEX} \ + RABBITMQ_CONFIG_ARG="-conf ${RABBITMQ_CONFIG_FILE} \ -conf_dir ${RABBITMQ_GENERATED_CONFIG_DIR} \ -conf_script_dir `dirname $0` \ -conf_schema_dir ${RABBITMQ_SCHEMA_DIR}" diff --git a/scripts/rabbitmq-server.bat b/scripts/rabbitmq-server.bat index 034abd472194..cae876f1d2ac 100644 --- a/scripts/rabbitmq-server.bat +++ b/scripts/rabbitmq-server.bat @@ -68,21 +68,7 @@ set RABBITMQ_EBIN_PATH="-pa !RABBITMQ_EBIN_ROOT!" CALL :get_noex !RABBITMQ_CONFIG_FILE! -if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!" ( - if exist "!RABBITMQ_CONFIG_FILE!.config" ( - set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE_NOEX!" - ) else if exist "!RABBITMQ_CONFIG_FILE!.conf" ( - set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE_NOEX!" ^ - -conf_dir !RABBITMQ_GENERATED_CONFIG_DIR! ^ - -conf_script_dir !CONF_SCRIPT_DIR:\=/! ^ - -conf_schema_dir !RABBITMQ_SCHEMA_DIR! - if exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( - set RABBITMQ_CONFIG_ARG=!RABBITMQ_CONFIG_ARG! ^ - -conf_advanced "!RABBITMQ_ADVANCED_CONFIG_FILE!" ^ - -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" - ) - ) -) else if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( +if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( if exist "!RABBITMQ_CONFIG_FILE!" ( set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE_NOEX!" ) @@ -96,6 +82,20 @@ if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!" ( -conf_advanced "!RABBITMQ_ADVANCED_CONFIG_FILE!" ^ -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" ) +) else ( + if exist "!RABBITMQ_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" + ) else if exist "!RABBITMQ_CONFIG_FILE!.conf" ( + set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE!" ^ + -conf_dir !RABBITMQ_GENERATED_CONFIG_DIR! ^ + -conf_script_dir !CONF_SCRIPT_DIR:\=/! ^ + -conf_schema_dir !RABBITMQ_SCHEMA_DIR! + if exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=!RABBITMQ_CONFIG_ARG! ^ + -conf_advanced "!RABBITMQ_ADVANCED_CONFIG_FILE!" ^ + -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" + ) + ) ) set RABBITMQ_LISTEN_ARG= @@ -126,7 +126,7 @@ if "!RABBITMQ_NODE_ONLY!"=="" ( if "!RABBITMQ_IO_THREAD_POOL_SIZE!"=="" ( set RABBITMQ_IO_THREAD_POOL_SIZE=64 -) +) set ENV_OK=true diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index 58e92eb36dd4..e635cb689679 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -159,9 +159,24 @@ if not exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( CALL :get_noex !RABBITMQ_CONFIG_FILE! -if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!" ( - if exist "!RABBITMQ_CONFIG_FILE!.config" ( + +if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( + if exist "!RABBITMQ_CONFIG_FILE!" ( set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE_NOEX!" + ) +) else if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.conf" ( + set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE_NOEX!" ^ + -conf_dir !RABBITMQ_GENERATED_CONFIG_DIR! ^ + -conf_script_dir !CONF_SCRIPT_DIR:\=/! ^ + -conf_schema_dir !RABBITMQ_SCHEMA_DIR! + if exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=!RABBITMQ_CONFIG_ARG! ^ + -conf_advanced "!RABBITMQ_ADVANCED_CONFIG_FILE!" ^ + -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" + ) +) else ( + if exist "!RABBITMQ_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" ) else if exist "!RABBITMQ_CONFIG_FILE!.config" ( set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" ) else ( @@ -177,20 +192,6 @@ if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!" ( -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" ) ) -) else if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( - if exist "!RABBITMQ_CONFIG_FILE!" ( - set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE_NOEX!" - ) -) else if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.conf" ( - set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE_NOEX!" ^ - -conf_dir !RABBITMQ_GENERATED_CONFIG_DIR! ^ - -conf_script_dir !CONF_SCRIPT_DIR:\=/! ^ - -conf_schema_dir !RABBITMQ_SCHEMA_DIR! - if exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( - set RABBITMQ_CONFIG_ARG=!RABBITMQ_CONFIG_ARG! ^ - -conf_advanced "!RABBITMQ_ADVANCED_CONFIG_FILE!" ^ - -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" - ) ) From bbb2695a040cca5c3a1d9b04b2edc34fb6d1fd13 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Thu, 31 Mar 2016 11:08:26 +0100 Subject: [PATCH 012/174] Equality operators and trimming extension --- scripts/rabbitmq-server | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server index 24c035d92a3a..2b26cc5ee8c2 100755 --- a/scripts/rabbitmq-server +++ b/scripts/rabbitmq-server @@ -95,21 +95,13 @@ fi set -e -strip_trailing_config_or_conf() { - local dir=$(dirname $1) - local filename_without_conf=$(basename $1 .conf) - local filename_without_config=$(basename $filename_without_conf .config) +RABBITMQ_CONFIG_FILE_NOEX="${RABBITMQ_CONFIG_FILE%.*}" - echo "$dir/$filename_without_config" -} - -RABBITMQ_CONFIG_FILE_NOEX=$(strip_trailing_config_or_conf $RABBITMQ_CONFIG_FILE) - -if [ "${RABBITMQ_CONFIG_FILE_NOEX}.config" == "${RABBITMQ_CONFIG_FILE}" ]; then +if [ "${RABBITMQ_CONFIG_FILE_NOEX}.config" = "${RABBITMQ_CONFIG_FILE}" ]; then if [ -f "${RABBITMQ_CONFIG_FILE}" ]; then RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE_NOEX}" fi -elif [ "${RABBITMQ_CONFIG_FILE_NOEX}.conf" == "${RABBITMQ_CONFIG_FILE}" ]; then +elif [ "${RABBITMQ_CONFIG_FILE_NOEX}.conf" = "${RABBITMQ_CONFIG_FILE}" ]; then RABBITMQ_CONFIG_ARG="-conf ${RABBITMQ_CONFIG_FILE_NOEX} \ -conf_dir ${RABBITMQ_GENERATED_CONFIG_DIR} \ -conf_script_dir `dirname $0` \ From 4f146b96fcbb27368bca4cbea793a42a774cee66 Mon Sep 17 00:00:00 2001 From: Brandon Shroyer Date: Thu, 31 Mar 2016 19:29:12 -0400 Subject: [PATCH 013/174] Allow memory-relative disk-free limits greater than 1.0 --- src/rabbit_disk_monitor.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_disk_monitor.erl b/src/rabbit_disk_monitor.erl index 124306487e8b..88a8096fd483 100644 --- a/src/rabbit_disk_monitor.erl +++ b/src/rabbit_disk_monitor.erl @@ -235,7 +235,7 @@ parse_free_win32(CommandResult) -> list_to_integer(lists:reverse(Free)). interpret_limit({mem_relative, Relative}) - when is_float(Relative), Relative < 1 -> + when is_float(Relative) -> round(Relative * vm_memory_monitor:get_total_memory()); interpret_limit(Absolute) -> case rabbit_resource_monitor_misc:parse_information_unit(Absolute) of From 4affec5cb796ecf54e8517312025f5804864d205 Mon Sep 17 00:00:00 2001 From: Ayanda Dube Date: Fri, 1 Apr 2016 15:59:02 +0100 Subject: [PATCH 014/174] Includes VHost to AuthProps. Ref: rabbitmq/rabbitmq-auth-backend-ldap#13 --- src/rabbit_direct.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl index 35d7eb7940f4..b5970274d422 100644 --- a/src/rabbit_direct.erl +++ b/src/rabbit_direct.erl @@ -76,8 +76,8 @@ connect({Username, none}, VHost, Protocol, Pid, Infos) -> VHost, Protocol, Pid, Infos); connect({Username, Password}, VHost, Protocol, Pid, Infos) -> - connect0(fun () -> rabbit_access_control:check_user_pass_login( - Username, Password) end, + connect0(fun () -> rabbit_access_control:check_user_login( + Username, [{password, Password}, {vhost, VHost}]) end, VHost, Protocol, Pid, Infos). connect0(AuthFun, VHost, Protocol, Pid, Infos) -> From 28826181bd2cef140fb8e9fe5b68da5a84835218 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Fri, 1 Apr 2016 18:20:18 +0100 Subject: [PATCH 015/174] Validate plugins before enabling --- src/rabbit.app.src | 2 +- src/rabbit_plugins.erl | 123 ++++++++++++++++++++---------------- src/rabbit_plugins_main.erl | 23 +++++++ 3 files changed, 92 insertions(+), 56 deletions(-) diff --git a/src/rabbit.app.src b/src/rabbit.app.src index 83e7237c806d..f40622c32c55 100644 --- a/src/rabbit.app.src +++ b/src/rabbit.app.src @@ -1,7 +1,7 @@ {application, rabbit, %% -*- erlang -*- [{description, "RabbitMQ"}, {id, "RabbitMQ"}, - {vsn, "0.0.0"}, + {vsn, "3.7.0"}, {modules, []}, {registered, [rabbit_amqqueue_sup, rabbit_log, diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index cf14a2efe22d..701266cab3a7 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -152,9 +152,7 @@ list(PluginsDir, IncludeRequiredDeps) -> lists:foldl( fun ({error, EZ, Reason}, {Plugins1, Problems1}) -> {Plugins1, [{EZ, Reason} | Problems1]}; - (Plugin = #plugin{name = Name, - rabbitmq_versions = Versions, - plugins_versions = PluginsVersions}, + (Plugin = #plugin{name = Name}, {Plugins1, Problems1}) -> %% Applications RabbitMQ depends on (eg. %% "rabbit_common") can't be considered @@ -163,21 +161,7 @@ list(PluginsDir, IncludeRequiredDeps) -> %% disable them. case IncludeRequiredDeps orelse not lists:member(Name, RabbitDeps) of - true -> - RabbitVersion = case application:get_key(rabbit, - vsn) of - undefined -> "0.0.0"; - {ok, Val} -> Val - end, - RabbitVersionValid = version_support(RabbitVersion, Versions), - DepsVersionsValid = check_plugins_versions(AllPlugins, PluginsVersions), - case [RabbitVersionValid, DepsVersionsValid] of - [ok, ok] -> - {[Plugin|Plugins1], Problems1}; - Errs -> - Errors = [Err || Err <- Errs, Err =/= ok], - {Plugins1, [{Name, Errors} | Problems1]} - end; + true -> {[Plugin|Plugins1], Problems1}; false -> {Plugins1, Problems1} end end, {[], []}, @@ -187,44 +171,11 @@ list(PluginsDir, IncludeRequiredDeps) -> _ -> rabbit_log:warning( "Problem reading some plugins: ~p~n", [Problems]) end, + Plugins = lists:filter(fun(P) -> not plugin_provided_by_otp(P) end, AvailablePlugins), ensure_dependencies(Plugins). -check_plugins_versions(AllPlugins, RequiredVersions) -> - ExistingVersions = [{Name, Vsn} - || #plugin{name = Name, version = Vsn} <- AllPlugins], - Problems = lists:foldl( - fun({Name, Versions}, Acc) -> - case proplists:get_value(Name, ExistingVersions) of - undefined -> [{missing_dependency, Name} | Acc]; - Version -> - case version_support(Version, Versions) of - {error, Err} -> [{Err, Name} | Acc]; - ok -> Acc - end - end - end, - [], - RequiredVersions), - case Problems of - [] -> ok; - _ -> {error, Problems} - end. - - -version_support(_RabbitVersion, []) -> ok; -version_support(RabbitVersion, Versions) -> - case lists:any(fun(V) -> - rabbit_misc:version_minor_equivalent(V, RabbitVersion) - andalso - rabbit_misc:version_compare(V, RabbitVersion, lte) - end, - Versions) of - true -> ok; - false -> {error, {version_mismatch, {RabbitVersion, Versions}}} - end. - %% @doc Read the list of enabled plugins from the supplied term file. read_enabled(PluginsFile) -> case rabbit_file:read_term_file(PluginsFile) of @@ -250,8 +201,9 @@ dependencies(Reverse, Sources, AllPlugins) -> false -> digraph_utils:reachable(Sources, G); true -> digraph_utils:reaching(Sources, G) end, + OrderedDests = digraph_utils:postorder(digraph_utils:subgraph(G, Dests)), true = digraph:delete(G), - Dests. + OrderedDests. %% For a few known cases, an externally provided plugin can be trusted. %% In this special case, it overrides the plugin. @@ -299,7 +251,12 @@ prepare_plugins(Enabled) -> AllPlugins = list(PluginsDistDir), Wanted = dependencies(false, Enabled, AllPlugins), WantedPlugins = lookup_plugins(Wanted, AllPlugins), - + RabbitVersion = RabbitVersion = case application:get_key(rabbit, vsn) of + undefined -> "0.0.0"; + {ok, Val} -> Val + end, + {ValidPlugins, Problems} = validate_plugins(WantedPlugins, RabbitVersion), + rabbit_log:error("Valid ~p~n Invalid ~p", [ValidPlugins, Problems]), case filelib:ensure_dir(ExpandDir ++ "/") of ok -> ok; {error, E2} -> throw({error, {cannot_create_plugins_expand_dir, @@ -312,6 +269,57 @@ prepare_plugins(Enabled) -> PluginAppDescPath <- filelib:wildcard(ExpandDir ++ "/*/ebin/*.app")], Wanted. +validate_plugins(WantedPlugins, RabbitVersion) -> + lists:foldl( + fun(#plugin{name = Name, + rabbitmq_versions = RabbitmqVersions, + plugins_versions = PluginsVersions} = Plugin, + {Plugins, Errors}) -> + case version_support(RabbitVersion, RabbitmqVersions) of + {error, Err} -> {Plugins, [{Name, Err} | Errors]}; + ok -> + case check_plugins_versions(Plugins, PluginsVersions) of + ok -> {[Plugin | Plugins], Errors}; + {error, Err} -> {Plugins, [{Name, Err} | Errors]} + end + end + end, + {[],[]}, + WantedPlugins). + +check_plugins_versions(AllPlugins, RequiredVersions) -> + ExistingVersions = [{Name, Vsn} + || #plugin{name = Name, version = Vsn} <- AllPlugins], + Problems = lists:foldl( + fun({Name, Versions}, Acc) -> + case proplists:get_value(Name, ExistingVersions) of + undefined -> [{missing_dependency, Name} | Acc]; + Version -> + case version_support(Version, Versions) of + {error, Err} -> [{Err, Name} | Acc]; + ok -> Acc + end + end + end, + [], + RequiredVersions), + case Problems of + [] -> ok; + _ -> {error, Problems} + end. + +version_support(_Version, []) -> ok; +version_support(Version, ExpectedVersions) -> + case lists:any(fun(ExpectedVersion) -> + rabbit_misc:version_minor_equivalent(ExpectedVersion, Version) + andalso + rabbit_misc:version_compare(ExpectedVersion, Version, lte) + end, + ExpectedVersions) of + true -> ok; + false -> {error, {version_mismatch, {Version, ExpectedVersions}}} + end. + clean_plugins(Plugins) -> {ok, ExpandDir} = application:get_env(rabbit, plugins_expand_dir), [clean_plugin(Plugin, ExpandDir) || Plugin <- Plugins]. @@ -424,4 +432,9 @@ plugin_names(Plugins) -> [Name || #plugin{name = Name} <- Plugins]. lookup_plugins(Names, AllPlugins) -> - [P || P = #plugin{name = Name} <- AllPlugins, lists:member(Name, Names)]. + % Preserve order of Names + lists:map( + fun(Name) -> + lists:keyfind(Name, #plugin.name, AllPlugins) + end, + Names). diff --git a/src/rabbit_plugins_main.erl b/src/rabbit_plugins_main.erl index e248989a7a90..6c4d35c58f6e 100644 --- a/src/rabbit_plugins_main.erl +++ b/src/rabbit_plugins_main.erl @@ -99,6 +99,11 @@ action(enable, Node, ToEnable0, Opts, State = #cli{all = All, _ -> throw({error_string, fmt_missing(Missing)}) end, NewEnabled = lists:usort(Enabled ++ ToEnable), + Invalid = validate_plugins(NewEnabled, State), + case Invalid of + [] -> ok; + _ -> throw({error_string, fmt_invalid(Invalid)}) + end, NewImplicit = write_enabled_plugins(NewEnabled, State), case NewEnabled -- Implicit of [] -> io:format("Plugin configuration unchanged.~n"); @@ -115,6 +120,11 @@ action(set, Node, NewEnabled0, Opts, State = #cli{all = All, [] -> ok; _ -> throw({error_string, fmt_missing(Missing)}) end, + Invalid = validate_plugins(NewEnabled, State), + case Invalid of + [] -> ok; + _ -> throw({error_string, fmt_invalid(Invalid)}) + end, NewImplicit = write_enabled_plugins(NewEnabled, State), case NewImplicit of [] -> io:format("All plugins are now disabled.~n"); @@ -155,6 +165,19 @@ action(help, _Node, _Args, _Opts, _State) -> %%---------------------------------------------------------------------------- +validate_plugins(Names, #cli{all = All}) -> + Deps = rabbit_plugins:dependencies(false, Names, All), + DepsPlugins = lists:map( + fun(Name) -> + lists:keyfind(Name, #plugin.name, All) + end, + Deps), + {_, Errors} = rabbit_plugins:validate_plugins(DepsPlugins, "0.0.0"), + Errors. + +fmt_invalid(Errors) -> + lists:flatten(io_lib:format("Problem reading some plugins: ~p~n", [Errors])). + %% Pretty print a list of plugins. format_plugins(Node, Pattern, Opts, #cli{all = All, enabled = Enabled, From 4476b4b2919917476d16f6a5f1ab2418852c0cbc Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Wed, 30 Mar 2016 17:09:33 +0100 Subject: [PATCH 016/174] Ignore OTP version in clustering --- src/rabbit_mnesia.erl | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index afd0508aac2d..6106a110d8f0 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -765,13 +765,11 @@ change_extra_db_nodes(ClusterNodes0, CheckOtherNodes) -> check_consistency(OTP, Rabbit) -> rabbit_misc:sequence_error( - [rabbit_version:check_otp_consistency(OTP), - check_rabbit_consistency(Rabbit)]). + [check_rabbit_consistency(Rabbit)]). check_consistency(OTP, Rabbit, Node, Status) -> rabbit_misc:sequence_error( - [rabbit_version:check_otp_consistency(OTP), - check_rabbit_consistency(Rabbit), + [check_rabbit_consistency(Rabbit), check_nodes_consistency(Node, Status)]). check_nodes_consistency(Node, RemoteStatus = {RemoteAllNodes, _, _}) -> From d2780fe4db57217fef31eb7fdf287c1cfe4d8483 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Thu, 31 Mar 2016 16:56:15 +0100 Subject: [PATCH 017/174] Check mnesia consistency by negotiating protocol --- src/rabbit_mnesia.erl | 92 +++++++++++++++++++++++++++++++++---------- 1 file changed, 71 insertions(+), 21 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 6106a110d8f0..96c9bffdc898 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -423,6 +423,7 @@ cluster_status(WhichNodes) -> node_info() -> {rabbit_misc:otp_release(), rabbit_misc:version(), + mnesia:system_info(protocol_version), cluster_status_from_mnesia()}. node_type() -> @@ -596,21 +597,21 @@ check_cluster_consistency(Node, CheckNodesConsistency) -> case rpc:call(Node, rabbit_mnesia, node_info, []) of {badrpc, _Reason} -> {error, not_found}; - {_OTP, _Rabbit, {error, _}} -> + {_OTP, Rabbit, Hash, _Status} when is_binary(Hash) -> + %% delegate hash checking implies version mismatch + rabbit_version:version_error("Rabbit", rabbit_misc:version(), Rabbit); + {_OTP, _Rabbit, _Protocol, {error, _}} -> {error, not_found}; - {OTP, Rabbit, {ok, Status}} when CheckNodesConsistency -> - case check_consistency(OTP, Rabbit, Node, Status) of + {_OTP, Rabbit, Protocol, {ok, Status}} when CheckNodesConsistency -> + case check_consistency(Node, Rabbit, Protocol, Status) of {error, _} = E -> E; {ok, Res} -> {ok, Res} end; - {OTP, Rabbit, {ok, Status}} -> - case check_consistency(OTP, Rabbit) of + {_OTP, Rabbit, Protocol, {ok, Status}} -> + case check_consistency(Node, Rabbit, Protocol) of {error, _} = E -> E; ok -> {ok, Status} - end; - {_OTP, Rabbit, _Hash, _Status} -> - %% delegate hash checking implies version mismatch - rabbit_version:version_error("Rabbit", rabbit_misc:version(), Rabbit) + end end. %%-------------------------------------------------------------------- @@ -763,13 +764,15 @@ change_extra_db_nodes(ClusterNodes0, CheckOtherNodes) -> Nodes end. -check_consistency(OTP, Rabbit) -> +check_consistency(Node, Rabbit, ProtocolVersion) -> rabbit_misc:sequence_error( - [check_rabbit_consistency(Rabbit)]). + [check_mnesia_consistency(Node, ProtocolVersion), + check_rabbit_consistency(Rabbit)]). -check_consistency(OTP, Rabbit, Node, Status) -> +check_consistency(Node, Rabbit, ProtocolVersion, Status) -> rabbit_misc:sequence_error( - [check_rabbit_consistency(Rabbit), + [check_mnesia_consistency(Node, ProtocolVersion), + check_rabbit_consistency(Rabbit), check_nodes_consistency(Node, Status)]). check_nodes_consistency(Node, RemoteStatus = {RemoteAllNodes, _, _}) -> @@ -783,6 +786,49 @@ check_nodes_consistency(Node, RemoteStatus = {RemoteAllNodes, _, _}) -> [node(), Node, Node])}} end. +check_mnesia_consistency(Node, ProtocolVersion) -> + % If mnesia is running we will just check protocol version + % If it's not running, we don't want it to join cluster until all checks pass + % so we start it without `dir` env variable to prevent + % joining cluster and/or corrupting data + with_running_or_clean_mnesia(fun() -> + case negotiate_protocol([Node]) of + [Node] -> ok; + [] -> + LocalVersion = mnesia:system_info(protocol_version), + {error, {inconsistent_cluster, + rabbit_misc:format("Mnesia protocol negotiation failed." + " Local version: ~p." + " Remote version ~p", + [LocalVersion, ProtocolVersion])}} + end + end). + +negotiate_protocol([Node]) -> + mnesia_monitor:negotiate_protocol([Node]). + +with_running_or_clean_mnesia(Fun) -> + MnesiaRunning = case mnesia:system_info(is_running) of + stopping -> + ensure_mnesia_not_running(), + no; + starting -> + ensure_mnesia_running(), + yes; + Other -> Other + end, + case MnesiaRunning of + yes -> Fun(); + no -> + {ok, MnesiaDir} = application:get_env(mnesia, dir), + application:unset_env(mnesia, dir), + mnesia:start(), + Result = Fun(), + application:stop(mnesia), + application:set_env(mnesia, dir, MnesiaDir), + Result + end. + check_rabbit_consistency(Remote) -> rabbit_version:check_version_consistency( rabbit_misc:version(), Remote, "Rabbit", @@ -818,15 +864,19 @@ find_auto_cluster_node([Node | Nodes]) -> find_auto_cluster_node(Nodes) end, case rpc:call(Node, rabbit_mnesia, node_info, []) of - {badrpc, _} = Reason -> Fail("~p~n", [Reason]); + {badrpc, _} = Reason -> + Fail("~p~n", [Reason]); %% old delegate hash check - {_OTP, RMQ, _Hash, _} -> Fail("version ~s~n", [RMQ]); - {_OTP, _RMQ, {error, _} = E} -> Fail("~p~n", [E]); - {OTP, RMQ, _} -> case check_consistency(OTP, RMQ) of - {error, _} -> Fail("versions ~p~n", - [{OTP, RMQ}]); - ok -> {ok, Node} - end + {_OTP, RMQ, Hash, _} when is_binary(Hash) -> + Fail("version ~s~n", [RMQ]); + {_OTP, _RMQ, _Protocol, {error, _} = E} -> + Fail("~p~n", [E]); + {OTP, RMQ, Protocol, _} -> + case check_consistency(Node, RMQ, Protocol) of + {error, _} -> Fail("versions ~p~n", + [{OTP, RMQ}]); + ok -> {ok, Node} + end end. is_only_clustered_disc_node() -> From 11fa86d24ac8ddafb3e642aeba4e1b4fc45adea6 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Fri, 1 Apr 2016 18:39:49 +0100 Subject: [PATCH 018/174] Support extension in advanced.config --- scripts/rabbitmq-server | 5 +++++ scripts/rabbitmq-server.bat | 9 +++++++-- scripts/rabbitmq-service.bat | 9 ++++++--- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server index 2b26cc5ee8c2..b5c39337a86d 100755 --- a/scripts/rabbitmq-server +++ b/scripts/rabbitmq-server @@ -62,6 +62,11 @@ RABBITMQ_EBIN_ROOT="${RABBITMQ_HOME}/ebin" set +e +RABBITMQ_ADVANCED_CONFIG_FILE_NOEX="${RABBITMQ_ADVANCED_CONFIG_FILE%.*}" +if [ "${RABBITMQ_ADVANCED_CONFIG_FILE_NOEX}.config" = "${RABBITMQ_ADVANCED_CONFIG_FILE}" ]; then + RABBITMQ_ADVANCED_CONFIG_FILE="${RABBITMQ_ADVANCED_CONFIG_FILE_NOEX}" +fi + # NOTIFY_SOCKET is needed here to prevent epmd from impersonating the # success of our startup sequence to systemd. NOTIFY_SOCKET= \ diff --git a/scripts/rabbitmq-server.bat b/scripts/rabbitmq-server.bat index cae876f1d2ac..8718197471d7 100644 --- a/scripts/rabbitmq-server.bat +++ b/scripts/rabbitmq-server.bat @@ -42,6 +42,11 @@ if not exist "!ERLANG_HOME!\bin\erl.exe" ( set RABBITMQ_EBIN_ROOT=!RABBITMQ_HOME!\ebin +for %%NOEX in (CALL :get_noex !RABBITMQ_ADVANCED_CONFIG_FILE!) do RABBITMQ_ADVANCED_CONFIG_FILE_NOEX=%%NOEX +if "!RABBITMQ_ADVANCED_CONFIG_FILE!" == "!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX!.config" ( + RABBITMQ_ADVANCED_CONFIG_FILE=!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX! +) + "!ERLANG_HOME!\bin\erl.exe" ^ -pa "!RABBITMQ_EBIN_ROOT!" ^ -noinput -hidden ^ @@ -66,7 +71,7 @@ if not exist "!RABBITMQ_SCHEMA_DIR!\rabbitmq.schema" ( set RABBITMQ_EBIN_PATH="-pa !RABBITMQ_EBIN_ROOT!" -CALL :get_noex !RABBITMQ_CONFIG_FILE! +for %%NOEX in (CALL :get_noex !RABBITMQ_CONFIG_FILE!) do RABBITMQ_CONFIG_FILE_NOEX=%%NOEX if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( if exist "!RABBITMQ_CONFIG_FILE!" ( @@ -178,7 +183,7 @@ if "%~2"=="" ( EXIT /B 0 :get_noex -set RABBITMQ_CONFIG_FILE_NOEX=%~dpn1 +echo %~dpn1 EXIT /B 0 endlocal diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index e635cb689679..ed1829c0d621 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -126,6 +126,10 @@ set RABBITMQ_EBIN_ROOT=!RABBITMQ_HOME!\ebin set RABBITMQ_CONFIG_FILE="!RABBITMQ_CONFIG_FILE!" +for %%NOEX in (CALL :get_noex !RABBITMQ_ADVANCED_CONFIG_FILE!) do RABBITMQ_ADVANCED_CONFIG_FILE_NOEX=%%NOEX +if "!RABBITMQ_ADVANCED_CONFIG_FILE!" == "!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX!.config" ( + RABBITMQ_ADVANCED_CONFIG_FILE=!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX! +) "!ERLANG_HOME!\bin\erl.exe" ^ -pa "!RABBITMQ_EBIN_ROOT!" ^ @@ -157,8 +161,7 @@ if not exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( echo []. > !RABBITMQ_ADVANCED_CONFIG_FILE!.config ) -CALL :get_noex !RABBITMQ_CONFIG_FILE! - +for %%NOEX in (CALL :get_noex !RABBITMQ_CONFIG_FILE!) do RABBITMQ_CONFIG_FILE_NOEX=%%NOEX if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( if exist "!RABBITMQ_CONFIG_FILE!" ( @@ -290,7 +293,7 @@ if "%~2"=="" ( EXIT /B 0 :get_noex -set RABBITMQ_CONFIG_FILE_NOEX=%~dpn1 +echo %~dpn1 EXIT /B 0 endlocal From 5ebcb9b437940289b2a133d9d9c890124c2e176c Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Mon, 4 Apr 2016 11:46:37 +0100 Subject: [PATCH 019/174] Unquoting and extension trimming in windows scripts --- scripts/rabbitmq-env.bat | 3 ++- scripts/rabbitmq-server.bat | 8 ++++---- scripts/rabbitmq-service.bat | 9 +++++---- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/scripts/rabbitmq-env.bat b/scripts/rabbitmq-env.bat index c29ef5d5cccc..6ec8373f4c3c 100644 --- a/scripts/rabbitmq-env.bat +++ b/scripts/rabbitmq-env.bat @@ -168,11 +168,12 @@ if "!RABBITMQ_GENERATED_CONFIG_DIR!"=="" ( ) ) +CALL :unquote RABBITMQ_ADVANCED_CONFIG_FILE %RABBITMQ_ADVANCED_CONFIG_FILE% if "!RABBITMQ_ADVANCED_CONFIG_FILE!"=="" ( if "!ADVANCED_CONFIG_FILE!"=="" ( set RABBITMQ_ADVANCED_CONFIG_FILE=!RABBITMQ_BASE!\advanced ) else ( - set RABBITMQ_ADVANCED_CONFIG_FILE=!GENERATED_CONFIG_DIR! + set RABBITMQ_ADVANCED_CONFIG_FILE=!ADVANCED_CONFIG_FILE! ) ) diff --git a/scripts/rabbitmq-server.bat b/scripts/rabbitmq-server.bat index 8718197471d7..d87dc9d5fbde 100644 --- a/scripts/rabbitmq-server.bat +++ b/scripts/rabbitmq-server.bat @@ -42,9 +42,9 @@ if not exist "!ERLANG_HOME!\bin\erl.exe" ( set RABBITMQ_EBIN_ROOT=!RABBITMQ_HOME!\ebin -for %%NOEX in (CALL :get_noex !RABBITMQ_ADVANCED_CONFIG_FILE!) do RABBITMQ_ADVANCED_CONFIG_FILE_NOEX=%%NOEX +CALL :get_noex !RABBITMQ_ADVANCED_CONFIG_FILE! RABBITMQ_ADVANCED_CONFIG_FILE_NOEX if "!RABBITMQ_ADVANCED_CONFIG_FILE!" == "!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX!.config" ( - RABBITMQ_ADVANCED_CONFIG_FILE=!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX! + set RABBITMQ_ADVANCED_CONFIG_FILE=!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX! ) "!ERLANG_HOME!\bin\erl.exe" ^ @@ -71,7 +71,7 @@ if not exist "!RABBITMQ_SCHEMA_DIR!\rabbitmq.schema" ( set RABBITMQ_EBIN_PATH="-pa !RABBITMQ_EBIN_ROOT!" -for %%NOEX in (CALL :get_noex !RABBITMQ_CONFIG_FILE!) do RABBITMQ_CONFIG_FILE_NOEX=%%NOEX +CALL :get_noex !RABBITMQ_CONFIG_FILE! RABBITMQ_CONFIG_FILE_NOEX if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( if exist "!RABBITMQ_CONFIG_FILE!" ( @@ -183,7 +183,7 @@ if "%~2"=="" ( EXIT /B 0 :get_noex -echo %~dpn1 +set "%~2=%~dpn1" EXIT /B 0 endlocal diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index ed1829c0d621..843336267225 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -126,9 +126,10 @@ set RABBITMQ_EBIN_ROOT=!RABBITMQ_HOME!\ebin set RABBITMQ_CONFIG_FILE="!RABBITMQ_CONFIG_FILE!" -for %%NOEX in (CALL :get_noex !RABBITMQ_ADVANCED_CONFIG_FILE!) do RABBITMQ_ADVANCED_CONFIG_FILE_NOEX=%%NOEX +CALL :get_noex !RABBITMQ_ADVANCED_CONFIG_FILE! RABBITMQ_ADVANCED_CONFIG_FILE_NOEX + if "!RABBITMQ_ADVANCED_CONFIG_FILE!" == "!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX!.config" ( - RABBITMQ_ADVANCED_CONFIG_FILE=!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX! + set RABBITMQ_ADVANCED_CONFIG_FILE=!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX! ) "!ERLANG_HOME!\bin\erl.exe" ^ @@ -161,7 +162,7 @@ if not exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( echo []. > !RABBITMQ_ADVANCED_CONFIG_FILE!.config ) -for %%NOEX in (CALL :get_noex !RABBITMQ_CONFIG_FILE!) do RABBITMQ_CONFIG_FILE_NOEX=%%NOEX +CALL :get_noex !RABBITMQ_CONFIG_FILE! RABBITMQ_CONFIG_FILE_NOEX if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( if exist "!RABBITMQ_CONFIG_FILE!" ( @@ -293,7 +294,7 @@ if "%~2"=="" ( EXIT /B 0 :get_noex -echo %~dpn1 +set "%~2=%~dpn1" EXIT /B 0 endlocal From 47baba044ae832b9e3bb7c3862e54ffdcda48d28 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Mon, 4 Apr 2016 14:48:42 +0100 Subject: [PATCH 020/174] Error messages in rabbitmq-plugins --- src/rabbit_plugins.erl | 19 +++++++++++-------- src/rabbit_plugins_main.erl | 23 ++++++++++++++++++++--- 2 files changed, 31 insertions(+), 11 deletions(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index 701266cab3a7..bb09be3370d4 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -251,24 +251,27 @@ prepare_plugins(Enabled) -> AllPlugins = list(PluginsDistDir), Wanted = dependencies(false, Enabled, AllPlugins), WantedPlugins = lookup_plugins(Wanted, AllPlugins), - RabbitVersion = RabbitVersion = case application:get_key(rabbit, vsn) of - undefined -> "0.0.0"; - {ok, Val} -> Val - end, - {ValidPlugins, Problems} = validate_plugins(WantedPlugins, RabbitVersion), - rabbit_log:error("Valid ~p~n Invalid ~p", [ValidPlugins, Problems]), + {ValidPlugins, Problems} = validate_plugins(WantedPlugins), + %TODO: do not enable invalid plugins case filelib:ensure_dir(ExpandDir ++ "/") of ok -> ok; {error, E2} -> throw({error, {cannot_create_plugins_expand_dir, [ExpandDir, E2]}}) end, - [prepare_plugin(Plugin, ExpandDir) || Plugin <- WantedPlugins], [prepare_dir_plugin(PluginAppDescPath) || PluginAppDescPath <- filelib:wildcard(ExpandDir ++ "/*/ebin/*.app")], Wanted. +validate_plugins(WantedPlugins) -> + application:load(rabbit), + RabbitVersion = RabbitVersion = case application:get_key(rabbit, vsn) of + undefined -> "0.0.0"; + {ok, Val} -> Val + end, + validate_plugins(WantedPlugins, RabbitVersion). + validate_plugins(WantedPlugins, RabbitVersion) -> lists:foldl( fun(#plugin{name = Name, @@ -276,7 +279,7 @@ validate_plugins(WantedPlugins, RabbitVersion) -> plugins_versions = PluginsVersions} = Plugin, {Plugins, Errors}) -> case version_support(RabbitVersion, RabbitmqVersions) of - {error, Err} -> {Plugins, [{Name, Err} | Errors]}; + {error, Err} -> {Plugins, [{Name, [Err]} | Errors]}; ok -> case check_plugins_versions(Plugins, PluginsVersions) of ok -> {[Plugin | Plugins], Errors}; diff --git a/src/rabbit_plugins_main.erl b/src/rabbit_plugins_main.erl index 6c4d35c58f6e..18cb9b8ce77f 100644 --- a/src/rabbit_plugins_main.erl +++ b/src/rabbit_plugins_main.erl @@ -172,11 +172,28 @@ validate_plugins(Names, #cli{all = All}) -> lists:keyfind(Name, #plugin.name, All) end, Deps), - {_, Errors} = rabbit_plugins:validate_plugins(DepsPlugins, "0.0.0"), + {_, Errors} = rabbit_plugins:validate_plugins(DepsPlugins), Errors. -fmt_invalid(Errors) -> - lists:flatten(io_lib:format("Problem reading some plugins: ~p~n", [Errors])). +fmt_invalid(InvalidPlugins) -> + lists:flatten(["Failed to enable some plugins: \r\n" + | [fmt_invalid_plugin(Plugin) || Plugin <- InvalidPlugins]]). + +fmt_invalid_plugin({Name, Errors}) -> + [io_lib:format(" ~p:~n", [Name]) + | [fmt_invalid_plugin_error(Err) || Err <- Errors]]. + +fmt_invalid_plugin_error({missing_dependency, Dep}) -> + io_lib:format(" Dependency is missing or invalid: ~p~n", [Dep]); +fmt_invalid_plugin_error({version_mismatch, {Version, Required}}) -> + io_lib:format(" Broker version is invalid." + " Current version: ~p Required: ~p~n", [Version, Required]); +fmt_invalid_plugin_error({{version_mismatch, {Version, Required}}, Name}) -> + io_lib:format(" ~p plugin version is invalid." + " Current version: ~p Required: ~p~n", + [Name, Version, Required]); +fmt_invalid_plugin_error(Err) -> + io_lib:format(" Unknown error ~p~n", [Err]). %% Pretty print a list of plugins. format_plugins(Node, Pattern, Opts, #cli{all = All, From 9c76b9efb565a29d7684f94b1e1dee6c5a9fc19e Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Mon, 4 Apr 2016 14:58:05 +0100 Subject: [PATCH 021/174] Remove start arguments log --- scripts/rabbitmq-service.bat | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index 843336267225..7225c91b3c29 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -253,8 +253,6 @@ set ERLANG_SERVICE_ARGUMENTS= ^ !RABBITMQ_DIST_ARG! ^ !STARVAR! -echo "!ERLANG_SERVICE_ARGUMENTS!" > "!RABBITMQ_CONFIG_FILE!.txt" - set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:\=\\! set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:"=\"! From 9b23813646904e149a33a848a36d9341547fb3dd Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Mon, 4 Apr 2016 17:19:00 +0100 Subject: [PATCH 022/174] Log invalid plugins during startup --- src/rabbit_plugins.erl | 46 +++++++++++++++++++++++++++---------- src/rabbit_plugins_main.erl | 26 ++++----------------- 2 files changed, 38 insertions(+), 34 deletions(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index bb09be3370d4..054ec1cf91d8 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -22,9 +22,9 @@ -export([ensure/1]). -export([extract_schemas/1]). -export([version_support/2]). +-export([validate_plugins/1, format_invalid_plugins/1]). %%---------------------------------------------------------------------------- --compile(export_all). -ifdef(use_specs). -type(plugin_name() :: atom()). @@ -252,43 +252,65 @@ prepare_plugins(Enabled) -> Wanted = dependencies(false, Enabled, AllPlugins), WantedPlugins = lookup_plugins(Wanted, AllPlugins), {ValidPlugins, Problems} = validate_plugins(WantedPlugins), - %TODO: do not enable invalid plugins + %TODO: error message formatting + rabbit_log:warning(format_invalid_plugins(Problems)), case filelib:ensure_dir(ExpandDir ++ "/") of ok -> ok; {error, E2} -> throw({error, {cannot_create_plugins_expand_dir, [ExpandDir, E2]}}) end, - [prepare_plugin(Plugin, ExpandDir) || Plugin <- WantedPlugins], + [prepare_plugin(Plugin, ExpandDir) || Plugin <- ValidPlugins], [prepare_dir_plugin(PluginAppDescPath) || PluginAppDescPath <- filelib:wildcard(ExpandDir ++ "/*/ebin/*.app")], Wanted. -validate_plugins(WantedPlugins) -> +format_invalid_plugins(InvalidPlugins) -> + lists:flatten(["Failed to enable some plugins: \r\n" + | [format_invalid_plugin(Plugin) + || Plugin <- InvalidPlugins]]). + +format_invalid_plugin({Name, Errors}) -> + [io_lib:format(" ~p:~n", [Name]) + | [format_invalid_plugin_error(Err) || Err <- Errors]]. + +format_invalid_plugin_error({missing_dependency, Dep}) -> + io_lib:format(" Dependency is missing or invalid: ~p~n", [Dep]); +format_invalid_plugin_error({version_mismatch, {Version, Required}}) -> + io_lib:format(" Broker version is invalid." + " Current version: ~p Required: ~p~n", [Version, Required]); +format_invalid_plugin_error({{version_mismatch, {Version, Required}}, Name}) -> + io_lib:format(" ~p plugin version is invalid." + " Current version: ~p Required: ~p~n", + [Name, Version, Required]); +format_invalid_plugin_error(Err) -> + io_lib:format(" Unknown error ~p~n", [Err]). + +validate_plugins(Plugins) -> application:load(rabbit), RabbitVersion = RabbitVersion = case application:get_key(rabbit, vsn) of undefined -> "0.0.0"; {ok, Val} -> Val end, - validate_plugins(WantedPlugins, RabbitVersion). + validate_plugins(Plugins, RabbitVersion). -validate_plugins(WantedPlugins, RabbitVersion) -> +validate_plugins(Plugins, RabbitVersion) -> lists:foldl( fun(#plugin{name = Name, rabbitmq_versions = RabbitmqVersions, plugins_versions = PluginsVersions} = Plugin, - {Plugins, Errors}) -> + {Plugins0, Errors}) -> case version_support(RabbitVersion, RabbitmqVersions) of - {error, Err} -> {Plugins, [{Name, [Err]} | Errors]}; + {error, Err} -> {Plugins0, [{Name, [Err]} | Errors]}; ok -> - case check_plugins_versions(Plugins, PluginsVersions) of - ok -> {[Plugin | Plugins], Errors}; - {error, Err} -> {Plugins, [{Name, Err} | Errors]} + case check_plugins_versions(Plugins0, PluginsVersions) of + ok -> {[Plugin | Plugins0], Errors}; + {error, Err} -> {Plugins0, [{Name, Err} | Errors]} end end end, {[],[]}, - WantedPlugins). + Plugins). check_plugins_versions(AllPlugins, RequiredVersions) -> ExistingVersions = [{Name, Vsn} diff --git a/src/rabbit_plugins_main.erl b/src/rabbit_plugins_main.erl index 18cb9b8ce77f..d4df931f6499 100644 --- a/src/rabbit_plugins_main.erl +++ b/src/rabbit_plugins_main.erl @@ -102,7 +102,8 @@ action(enable, Node, ToEnable0, Opts, State = #cli{all = All, Invalid = validate_plugins(NewEnabled, State), case Invalid of [] -> ok; - _ -> throw({error_string, fmt_invalid(Invalid)}) + _ -> throw({error_string, + rabbit_plugins:format_invalid_plugins(Invalid)}) end, NewImplicit = write_enabled_plugins(NewEnabled, State), case NewEnabled -- Implicit of @@ -123,7 +124,8 @@ action(set, Node, NewEnabled0, Opts, State = #cli{all = All, Invalid = validate_plugins(NewEnabled, State), case Invalid of [] -> ok; - _ -> throw({error_string, fmt_invalid(Invalid)}) + _ -> throw({error_string, + rabbit_plugins:format_invalid_plugins(Invalid)}) end, NewImplicit = write_enabled_plugins(NewEnabled, State), case NewImplicit of @@ -175,26 +177,6 @@ validate_plugins(Names, #cli{all = All}) -> {_, Errors} = rabbit_plugins:validate_plugins(DepsPlugins), Errors. -fmt_invalid(InvalidPlugins) -> - lists:flatten(["Failed to enable some plugins: \r\n" - | [fmt_invalid_plugin(Plugin) || Plugin <- InvalidPlugins]]). - -fmt_invalid_plugin({Name, Errors}) -> - [io_lib:format(" ~p:~n", [Name]) - | [fmt_invalid_plugin_error(Err) || Err <- Errors]]. - -fmt_invalid_plugin_error({missing_dependency, Dep}) -> - io_lib:format(" Dependency is missing or invalid: ~p~n", [Dep]); -fmt_invalid_plugin_error({version_mismatch, {Version, Required}}) -> - io_lib:format(" Broker version is invalid." - " Current version: ~p Required: ~p~n", [Version, Required]); -fmt_invalid_plugin_error({{version_mismatch, {Version, Required}}, Name}) -> - io_lib:format(" ~p plugin version is invalid." - " Current version: ~p Required: ~p~n", - [Name, Version, Required]); -fmt_invalid_plugin_error(Err) -> - io_lib:format(" Unknown error ~p~n", [Err]). - %% Pretty print a list of plugins. format_plugins(Node, Pattern, Opts, #cli{all = All, enabled = Enabled, From c45b4c30e1836a6cbec6cd86a13143305eb14ba1 Mon Sep 17 00:00:00 2001 From: Alexey Lebedeff Date: Mon, 4 Apr 2016 19:33:25 +0300 Subject: [PATCH 023/174] Fix half-hearted attempt to erase mnesia in OCF RA ocf_run does `"$@"`, so "${MNESIA_FILES}/*" wasn't expanded and mnesia directory wasn't actually cleaned up Fuel bug: https://bugs.launchpad.net/fuel/+bug/1565868 --- scripts/rabbitmq-server-ha.ocf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/rabbitmq-server-ha.ocf b/scripts/rabbitmq-server-ha.ocf index ae7991833baa..5ead9a81ad7f 100755 --- a/scripts/rabbitmq-server-ha.ocf +++ b/scripts/rabbitmq-server-ha.ocf @@ -678,8 +678,8 @@ reset_mnesia() { # remove mnesia files, if required if $make_amnesia ; then kill_rmq_and_remove_pid - ocf_run rm -rf "${MNESIA_FILES}/*" - ocf_log warn "${LH} Mnesia files appear corrupted and have been removed." + ocf_run rm -rf "${MNESIA_FILES}" + ocf_log warn "${LH} Mnesia files appear corrupted and have been removed from ${MNESIA_FILES}." fi # always return OCF SUCCESS return $OCF_SUCCESS From ab9ebd59ab2d43030a84e3ed9d4550b8929ab522 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Tue, 5 Apr 2016 11:05:17 +0100 Subject: [PATCH 024/174] Remove redundant lines --- scripts/rabbitmq-service.bat | 4 ---- 1 file changed, 4 deletions(-) diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index 7225c91b3c29..8a77fa176917 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -124,8 +124,6 @@ if errorlevel 1 ( set RABBITMQ_EBIN_ROOT=!RABBITMQ_HOME!\ebin -set RABBITMQ_CONFIG_FILE="!RABBITMQ_CONFIG_FILE!" - CALL :get_noex !RABBITMQ_ADVANCED_CONFIG_FILE! RABBITMQ_ADVANCED_CONFIG_FILE_NOEX if "!RABBITMQ_ADVANCED_CONFIG_FILE!" == "!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX!.config" ( @@ -181,8 +179,6 @@ if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( ) else ( if exist "!RABBITMQ_CONFIG_FILE!.config" ( set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" - ) else if exist "!RABBITMQ_CONFIG_FILE!.config" ( - set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" ) else ( rem Always specify generated config arguments, we cannot rem assume .conf file is available From 07bb8f2491cebfdb923d12e9e4ff819658187568 Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Tue, 5 Apr 2016 11:43:59 +0100 Subject: [PATCH 025/174] Update owner of mgmt_db tables --- src/rabbit_vm.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_vm.erl b/src/rabbit_vm.erl index d5f7328fec77..82effb4fc5e0 100644 --- a/src/rabbit_vm.erl +++ b/src/rabbit_vm.erl @@ -45,7 +45,7 @@ memory() -> Mnesia = mnesia_memory(), MsgIndexETS = ets_memory([msg_store_persistent, msg_store_transient]), - MgmtDbETS = ets_memory([rabbit_mgmt_db]), + MgmtDbETS = ets_memory([rabbit_mgmt_event_collector]), [{total, Total}, {processes, Processes}, From 811b0d9772808499fcee154d9a4060afe67a37e3 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Tue, 5 Apr 2016 13:58:36 +0100 Subject: [PATCH 026/174] Override CONF_ENV_FILE with RABBITMQ_CONF_ENV_FILE --- scripts/rabbitmq-defaults.bat | 4 +--- scripts/rabbitmq-env | 11 +++-------- scripts/rabbitmq-env.bat | 4 ++++ scripts/rabbitmq-server | 2 +- 4 files changed, 9 insertions(+), 12 deletions(-) diff --git a/scripts/rabbitmq-defaults.bat b/scripts/rabbitmq-defaults.bat index 27edd0d11eab..8fff5ea827a7 100644 --- a/scripts/rabbitmq-defaults.bat +++ b/scripts/rabbitmq-defaults.bat @@ -46,6 +46,4 @@ REM PLUGINS_DIR="${RABBITMQ_HOME}/plugins" for /f "delims=" %%F in ("!TDP0!..\plugins") do set PLUGINS_DIR=%%~dpsF%%~nF%%~xF REM CONF_ENV_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq-env.conf -if "!RABBITMQ_CONF_ENV_FILE!"=="" ( - set RABBITMQ_CONF_ENV_FILE=!RABBITMQ_BASE!\rabbitmq-env-conf.bat -) +set CONF_ENV_FILE=!RABBITMQ_BASE!\rabbitmq-env-conf.bat diff --git a/scripts/rabbitmq-env b/scripts/rabbitmq-env index dffed035ea72..35239620cab2 100644 --- a/scripts/rabbitmq-env +++ b/scripts/rabbitmq-env @@ -65,20 +65,15 @@ RABBITMQ_HOME="$(rmq_realpath "${RABBITMQ_SCRIPTS_DIR}/..")" ## Common defaults SERVER_ERL_ARGS="+P 1048576" -# warn about old rabbitmq.conf file, if no new one -if [ -f /etc/rabbitmq/rabbitmq.conf ] && \ - [ ! -f ${CONF_ENV_FILE} ] ; then - echo -n "WARNING: ignoring /etc/rabbitmq/rabbitmq.conf -- " - echo "location has moved to ${CONF_ENV_FILE}" -fi - # We save the current value of $RABBITMQ_PID_FILE in case it was set by # an init script. If $CONF_ENV_FILE overrides it again, we must ignore # it and warn the user. saved_RABBITMQ_PID_FILE=$RABBITMQ_PID_FILE ## Get configuration variables from the configure environment file -[ -f ${CONF_ENV_FILE} ] && . ${CONF_ENV_FILE} || true +[ "x" = "x$RABBITMQ_CONF_ENV_FILE" ] && RABBITMQ_CONF_ENV_FILE=${CONF_ENV_FILE} + +[ -f ${RABBITMQ_CONF_ENV_FILE} ] && . ${RABBITMQ_CONF_ENV_FILE} || true if [ "$saved_RABBITMQ_PID_FILE" -a \ "$saved_RABBITMQ_PID_FILE" != "$RABBITMQ_PID_FILE" ]; then diff --git a/scripts/rabbitmq-env.bat b/scripts/rabbitmq-env.bat index d5df9ddbd603..ea9cf4596226 100644 --- a/scripts/rabbitmq-env.bat +++ b/scripts/rabbitmq-env.bat @@ -47,6 +47,10 @@ REM set SERVER_ERL_ARGS=+P 1048576 REM ## Get configuration variables from the configure environment file REM [ -f ${CONF_ENV_FILE} ] && . ${CONF_ENV_FILE} || true +if "!RABBITMQ_CONF_ENV_FILE!"=="" ( + set RABBITMQ_CONF_ENV_FILE=!CONF_ENV_FILE! +) + if exist "!RABBITMQ_CONF_ENV_FILE!" ( call "!RABBITMQ_CONF_ENV_FILE!" ) diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server index 8ed7319b41ef..ab2975feb1fd 100755 --- a/scripts/rabbitmq-server +++ b/scripts/rabbitmq-server @@ -187,7 +187,7 @@ check_not_empty() { eval value=\$$name if [ -z "$value" ]; then echo "Error: ENV variable should be defined: $1. - Please check rabbitmq-env, rabbitmq-defaults, and $CONF_ENV_FILE script files" + Please check rabbitmq-env, rabbitmq-defaults, and ${RABBITMQ_CONF_ENV_FILE} script files" exit 78 fi } From 6d443bbae5ce570e3fdfd51088c0b7bbeef3c9ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Hoguin?= Date: Tue, 5 Apr 2016 16:28:49 +0200 Subject: [PATCH 027/174] Add upgrade function adding listener socket options --- src/rabbit_upgrade_functions.erl | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index f9ed62b4b261..b99a1d12ee27 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -24,6 +24,7 @@ -rabbit_upgrade({remove_user_scope, mnesia, []}). -rabbit_upgrade({hash_passwords, mnesia, []}). -rabbit_upgrade({add_ip_to_listener, mnesia, []}). +-rabbit_upgrade({add_opts_to_listener, mnesia, [add_ip_to_listener]}). -rabbit_upgrade({internal_exchanges, mnesia, []}). -rabbit_upgrade({user_to_internal_user, mnesia, [hash_passwords]}). -rabbit_upgrade({topic_trie, mnesia, []}). @@ -60,6 +61,7 @@ -spec(remove_user_scope/0 :: () -> 'ok'). -spec(hash_passwords/0 :: () -> 'ok'). -spec(add_ip_to_listener/0 :: () -> 'ok'). +-spec(add_opts_to_listener/0 :: () -> 'ok'). -spec(internal_exchanges/0 :: () -> 'ok'). -spec(user_to_internal_user/0 :: () -> 'ok'). -spec(topic_trie/0 :: () -> 'ok'). @@ -126,6 +128,14 @@ add_ip_to_listener() -> end, [node, protocol, host, ip_address, port]). +add_opts_to_listener() -> + transform( + rabbit_listener, + fun ({listener, Node, Protocol, Host, IP, Port}) -> + {listener, Node, Protocol, Host, IP, Port, []} + end, + [node, protocol, host, ip_address, port, opts]). + internal_exchanges() -> Tables = [rabbit_exchange, rabbit_durable_exchange], AddInternalFun = From 20a2d289c6e3f227a681d0886c09b30ebe14a4d8 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Tue, 5 Apr 2016 17:46:30 +0100 Subject: [PATCH 028/174] Test exports --- src/rabbit_plugins.erl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index 054ec1cf91d8..6d7c3b8f35da 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -21,9 +21,11 @@ -export([setup/0, active/0, read_enabled/1, list/1, list/2, dependencies/3]). -export([ensure/1]). -export([extract_schemas/1]). --export([version_support/2]). -export([validate_plugins/1, format_invalid_plugins/1]). +% Export for testing purpose. +-export([version_support/2, validate_plugins/2]). + %%---------------------------------------------------------------------------- -ifdef(use_specs). From b3eb6fd3809a8fc74601d44997eac2bc007194b1 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Fri, 18 Mar 2016 16:26:04 +0000 Subject: [PATCH 029/174] Support extensions in config files --- scripts/rabbitmq-env.bat | 8 ++++++- scripts/rabbitmq-server | 28 +++++++++++++++++++---- scripts/rabbitmq-server.bat | 38 +++++++++++++++++++++++-------- scripts/rabbitmq-service.bat | 37 +++++++++++++++++++++++++----- src/rabbit_prelaunch.erl | 44 ++++++++++++++++++++---------------- 5 files changed, 115 insertions(+), 40 deletions(-) diff --git a/scripts/rabbitmq-env.bat b/scripts/rabbitmq-env.bat index fb290b6075e6..19b20a468077 100644 --- a/scripts/rabbitmq-env.bat +++ b/scripts/rabbitmq-env.bat @@ -81,7 +81,7 @@ if "!RABBITMQ_NODENAME!"=="" ( if "!NODENAME!"=="" ( REM We use Erlang to query the local hostname because REM !COMPUTERNAME! and Erlang may return different results. - REM Start erl with -sname to make sure epmd is started. + REM Start erl with -sname to make sure epmd is started. call "%ERLANG_HOME%\bin\erl.exe" -A0 -noinput -boot start_clean -sname rabbit-prelaunch-epmd -eval "init:stop()." >nul 2>&1 for /f "delims=" %%F in ('call "%ERLANG_HOME%\bin\erl.exe" -A0 -noinput -boot start_clean -eval "net_kernel:start([list_to_atom(""rabbit-gethostname-"" ++ os:getpid()), %NAMETYPE%]), [_, H] = string:tokens(atom_to_list(node()), ""@""), io:format(""~s~n"", [H]), init:stop()."') do @set HOSTNAME=%%F set RABBITMQ_NODENAME=rabbit@!HOSTNAME! @@ -154,6 +154,8 @@ REM [ "x" = "x$RABBITMQ_SERVER_ERL_ARGS" ] && RABBITMQ_SERVER_ERL_ARGS=${SERVER_ REM No Windows equivalent REM [ "x" = "x$RABBITMQ_CONFIG_FILE" ] && RABBITMQ_CONFIG_FILE=${CONFIG_FILE} + +CALL :unquote RABBITMQ_CONFIG_FILE %RABBITMQ_CONFIG_FILE% if "!RABBITMQ_CONFIG_FILE!"=="" ( if "!CONFIG_FILE!"=="" ( set RABBITMQ_CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq @@ -421,3 +423,7 @@ REM ##--- End of overridden variables REM REM # Since we source this elsewhere, don't accidentally stop execution REM true + +:unquote +set %1=%~2 +EXIT /B 0 diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server index d3ad47a912d4..61d0768f6c64 100755 --- a/scripts/rabbitmq-server +++ b/scripts/rabbitmq-server @@ -95,10 +95,15 @@ fi set -e -if [ -f "${RABBITMQ_CONFIG_FILE}.config" ]; then - RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE}" -elif [ -f "${RABBITMQ_CONFIG_FILE}.conf" ]; then - RABBITMQ_CONFIG_ARG="-conf ${RABBITMQ_CONFIG_FILE} \ +RABBITMQ_CONFIG_FILE_NOEX="${RABBITMQ_CONFIG_FILE%.*}" +echo "NOEX: ${RABBITMQ_CONFIG_FILE_NOEX}" + +if [ "${RABBITMQ_CONFIG_FILE_NOEX}.config" == "${RABBITMQ_CONFIG_FILE}" ]; then + if [ -f "${RABBITMQ_CONFIG_FILE}" ]; then + RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE_NOEX}" + fi +elif [ "${RABBITMQ_CONFIG_FILE_NOEX}.conf" == "${RABBITMQ_CONFIG_FILE}" ]; then + RABBITMQ_CONFIG_ARG="-conf ${RABBITMQ_CONFIG_FILE_NOEX} \ -conf_dir ${RABBITMQ_GENERATED_CONFIG_DIR} \ -conf_script_dir `dirname $0` \ -conf_schema_dir ${RABBITMQ_SCHEMA_DIR}" @@ -107,6 +112,21 @@ elif [ -f "${RABBITMQ_CONFIG_FILE}.conf" ]; then -conf_advanced ${RABBITMQ_ADVANCED_CONFIG_FILE} \ -config ${RABBITMQ_ADVANCED_CONFIG_FILE}" fi +elif [ "${RABBITMQ_CONFIG_FILE_NOEX}" == "${RABBITMQ_CONFIG_FILE}" ]; then + if [ -f "${RABBITMQ_CONFIG_FILE}.config" ]; then + RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE_NOEX}" + elif [ -f "${RABBITMQ_CONFIG_FILE}.conf" ]; then + echo "NOEX CONF" + RABBITMQ_CONFIG_ARG="-conf ${RABBITMQ_CONFIG_FILE_NOEX} \ + -conf_dir ${RABBITMQ_GENERATED_CONFIG_DIR} \ + -conf_script_dir `dirname $0` \ + -conf_schema_dir ${RABBITMQ_SCHEMA_DIR}" + if [ -f "${RABBITMQ_ADVANCED_CONFIG_FILE}.config" ]; then + RABBITMQ_CONFIG_ARG="${RABBITMQ_CONFIG_ARG} \ + -conf_advanced ${RABBITMQ_ADVANCED_CONFIG_FILE} \ + -config ${RABBITMQ_ADVANCED_CONFIG_FILE}" + fi + fi fi RABBITMQ_LISTEN_ARG= diff --git a/scripts/rabbitmq-server.bat b/scripts/rabbitmq-server.bat index 2a38e77a82e9..034abd472194 100644 --- a/scripts/rabbitmq-server.bat +++ b/scripts/rabbitmq-server.bat @@ -42,8 +42,6 @@ if not exist "!ERLANG_HOME!\bin\erl.exe" ( set RABBITMQ_EBIN_ROOT=!RABBITMQ_HOME!\ebin -set RABBITMQ_CONFIG_FILE="!RABBITMQ_CONFIG_FILE!" - "!ERLANG_HOME!\bin\erl.exe" ^ -pa "!RABBITMQ_EBIN_ROOT!" ^ -noinput -hidden ^ @@ -68,10 +66,28 @@ if not exist "!RABBITMQ_SCHEMA_DIR!\rabbitmq.schema" ( set RABBITMQ_EBIN_PATH="-pa !RABBITMQ_EBIN_ROOT!" -if exist "!RABBITMQ_CONFIG_FILE!.config" ( - set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" -) else if exist "!RABBITMQ_CONFIG_FILE!.conf" ( - set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE!" ^ +CALL :get_noex !RABBITMQ_CONFIG_FILE! + +if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!" ( + if exist "!RABBITMQ_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE_NOEX!" + ) else if exist "!RABBITMQ_CONFIG_FILE!.conf" ( + set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE_NOEX!" ^ + -conf_dir !RABBITMQ_GENERATED_CONFIG_DIR! ^ + -conf_script_dir !CONF_SCRIPT_DIR:\=/! ^ + -conf_schema_dir !RABBITMQ_SCHEMA_DIR! + if exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=!RABBITMQ_CONFIG_ARG! ^ + -conf_advanced "!RABBITMQ_ADVANCED_CONFIG_FILE!" ^ + -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" + ) + ) +) else if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( + if exist "!RABBITMQ_CONFIG_FILE!" ( + set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE_NOEX!" + ) +) else if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.conf" ( + set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE_NOEX!" ^ -conf_dir !RABBITMQ_GENERATED_CONFIG_DIR! ^ -conf_script_dir !CONF_SCRIPT_DIR:\=/! ^ -conf_schema_dir !RABBITMQ_SCHEMA_DIR! @@ -80,7 +96,7 @@ if exist "!RABBITMQ_CONFIG_FILE!.config" ( -conf_advanced "!RABBITMQ_ADVANCED_CONFIG_FILE!" ^ -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" ) -) +) set RABBITMQ_LISTEN_ARG= if not "!RABBITMQ_NODE_IP_ADDRESS!"=="" ( @@ -114,7 +130,7 @@ if "!RABBITMQ_IO_THREAD_POOL_SIZE!"=="" ( set ENV_OK=true -CALL :check_not_empty "RABBITMQ_BOOT_MODULE" !RABBITMQ_BOOT_MODULE! +CALL :check_not_empty "RABBITMQ_BOOT_MODULE" !RABBITMQ_BOOT_MODULE! CALL :check_not_empty "RABBITMQ_NAME_TYPE" !RABBITMQ_NAME_TYPE! CALL :check_not_empty "RABBITMQ_NODENAME" !RABBITMQ_NODENAME! @@ -157,10 +173,14 @@ EXIT /B 0 if "%~2"=="" ( ECHO "Error: ENV variable should be defined: %1. Please check rabbitmq-env and rabbitmq-defaults, and !RABBITMQ_CONF_ENV_FILE! script files. Check also your Environment Variables settings" set ENV_OK=false - EXIT /B 78 + EXIT /B 78 ) EXIT /B 0 +:get_noex +set RABBITMQ_CONFIG_FILE_NOEX=%~dpn1 +EXIT /B 0 + endlocal endlocal diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index 7e80e78398dc..58e92eb36dd4 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -157,12 +157,32 @@ if not exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( echo []. > !RABBITMQ_ADVANCED_CONFIG_FILE!.config ) -if exist "!RABBITMQ_CONFIG_FILE!.config" ( - set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" -) else ( - rem Always specify generated config arguments, we cannot - rem assume .conf file is available - set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE!" ^ +CALL :get_noex !RABBITMQ_CONFIG_FILE! + +if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!" ( + if exist "!RABBITMQ_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE_NOEX!" + ) else if exist "!RABBITMQ_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" + ) else ( + rem Always specify generated config arguments, we cannot + rem assume .conf file is available + set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE!" ^ + -conf_dir !RABBITMQ_GENERATED_CONFIG_DIR! ^ + -conf_script_dir !CONF_SCRIPT_DIR:\=/! ^ + -conf_schema_dir !RABBITMQ_SCHEMA_DIR! + if exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=!RABBITMQ_CONFIG_ARG! ^ + -conf_advanced "!RABBITMQ_ADVANCED_CONFIG_FILE!" ^ + -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" + ) + ) +) else if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( + if exist "!RABBITMQ_CONFIG_FILE!" ( + set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE_NOEX!" + ) +) else if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.conf" ( + set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE_NOEX!" ^ -conf_dir !RABBITMQ_GENERATED_CONFIG_DIR! ^ -conf_script_dir !CONF_SCRIPT_DIR:\=/! ^ -conf_schema_dir !RABBITMQ_SCHEMA_DIR! @@ -173,6 +193,7 @@ if exist "!RABBITMQ_CONFIG_FILE!.config" ( ) ) + set RABBITMQ_LISTEN_ARG= if not "!RABBITMQ_NODE_IP_ADDRESS!"=="" ( if not "!RABBITMQ_NODE_PORT!"=="" ( @@ -267,5 +288,9 @@ if "%~2"=="" ( ) EXIT /B 0 +:get_noex +set RABBITMQ_CONFIG_FILE_NOEX=%~dpn1 +EXIT /B 0 + endlocal endlocal diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 3f83a153eaae..e87266320028 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -79,35 +79,39 @@ duplicate_node_check(NodeName, NodeHost) -> end. dist_port_set_check() -> - case os:getenv("RABBITMQ_CONFIG_FILE") of - false -> + case get_config(os:getenv("RABBITMQ_CONFIG_FILE")) of + {ok, [Config]} -> + Kernel = pget(kernel, Config, []), + case {pget(inet_dist_listen_min, Kernel, none), + pget(inet_dist_listen_max, Kernel, none)} of + {none, none} -> ok; + _ -> rabbit_misc:quit(?DO_NOT_SET_DIST_PORT) + end; + {ok, _} -> ok; - File -> - case get_config(File) of - {ok, [Config]} -> - Kernel = pget(kernel, Config, []), - case {pget(inet_dist_listen_min, Kernel, none), - pget(inet_dist_listen_max, Kernel, none)} of - {none, none} -> ok; - _ -> rabbit_misc:quit(?DO_NOT_SET_DIST_PORT) - end; - {ok, _} -> - ok; - {error, _} -> - ok - end + {error, _} -> + ok end. -get_config(File) -> - case rabbit_file:is_file(File ++ ".config") of - true -> file:consult(File ++ ".config"); - false -> +get_config(File) -> + case consult_file(File) of + {ok, Contents} -> {ok, Contents}; + {error, _} -> case rabbit_config:get_advanced_config() of none -> {error, enoent}; FileName -> file:consult(FileName) end end. +consult_file(false) -> {error, nofile}; +consult_file(File) -> + FileName = case filename:extension(File) of + "" -> File ++ ".config"; + ".config" -> File; + _ -> "" + end, + file:consult(FileName). + dist_port_range_check() -> case os:getenv("RABBITMQ_DIST_PORT") of false -> ok; From db4d5426b65763dd05fdb02d49336dd14d4c7cbc Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Tue, 29 Mar 2016 14:15:44 +0100 Subject: [PATCH 030/174] Remove debug echoes --- scripts/rabbitmq-server | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server index 61d0768f6c64..0638dd9af33a 100755 --- a/scripts/rabbitmq-server +++ b/scripts/rabbitmq-server @@ -96,7 +96,6 @@ fi set -e RABBITMQ_CONFIG_FILE_NOEX="${RABBITMQ_CONFIG_FILE%.*}" -echo "NOEX: ${RABBITMQ_CONFIG_FILE_NOEX}" if [ "${RABBITMQ_CONFIG_FILE_NOEX}.config" == "${RABBITMQ_CONFIG_FILE}" ]; then if [ -f "${RABBITMQ_CONFIG_FILE}" ]; then @@ -116,7 +115,6 @@ elif [ "${RABBITMQ_CONFIG_FILE_NOEX}" == "${RABBITMQ_CONFIG_FILE}" ]; then if [ -f "${RABBITMQ_CONFIG_FILE}.config" ]; then RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE_NOEX}" elif [ -f "${RABBITMQ_CONFIG_FILE}.conf" ]; then - echo "NOEX CONF" RABBITMQ_CONFIG_ARG="-conf ${RABBITMQ_CONFIG_FILE_NOEX} \ -conf_dir ${RABBITMQ_GENERATED_CONFIG_DIR} \ -conf_script_dir `dirname $0` \ From e3b856330562951fff07767ba82ccd03b13a19a4 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Thu, 31 Mar 2016 02:51:02 +0300 Subject: [PATCH 031/174] Strip off trailing .conf/.config using dirname and basename Otherwise if there is no dot in RABBITMQ_CONFIG_FILE, RABBITMQ_CONFIG_FILE_NOEX ends up being empty. --- scripts/rabbitmq-server | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server index 0638dd9af33a..65bca88844a1 100755 --- a/scripts/rabbitmq-server +++ b/scripts/rabbitmq-server @@ -95,7 +95,15 @@ fi set -e -RABBITMQ_CONFIG_FILE_NOEX="${RABBITMQ_CONFIG_FILE%.*}" +strip_trailing_config_or_conf() { + local dir=$(dirname $1) + local filename_without_conf=$(basename $1 .conf) + local filename_without_config=$(basename $filename_without_conf .config) + + echo "$dir/$filename_without_config" +} + +RABBITMQ_CONFIG_FILE_NOEX=$(strip_trailing_config_or_conf $RABBITMQ_CONFIG_FILE) if [ "${RABBITMQ_CONFIG_FILE_NOEX}.config" == "${RABBITMQ_CONFIG_FILE}" ]; then if [ -f "${RABBITMQ_CONFIG_FILE}" ]; then From 06fa298aacb9826a68224c123abb24c2cdabad22 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Thu, 31 Mar 2016 10:13:26 +0100 Subject: [PATCH 032/174] Condition ordering to set RABBITMQ_CONFIG_ARG --- scripts/rabbitmq-server | 8 ++++---- scripts/rabbitmq-server.bat | 32 ++++++++++++++++---------------- scripts/rabbitmq-service.bat | 33 +++++++++++++++++---------------- 3 files changed, 37 insertions(+), 36 deletions(-) diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server index 65bca88844a1..62fb7b05fd56 100755 --- a/scripts/rabbitmq-server +++ b/scripts/rabbitmq-server @@ -77,7 +77,7 @@ RABBITMQ_DIST_PORT=$RABBITMQ_DIST_PORT \ -rabbit enabled_plugins_file "\"$RABBITMQ_ENABLED_PLUGINS_FILE\"" \ -rabbit plugins_dir "\"$RABBITMQ_PLUGINS_DIR\"" \ -extra "${RABBITMQ_NODENAME}" - + PRELAUNCH_RESULT=$? if [ ${PRELAUNCH_RESULT} = 2 ] ; then # dist port is mentioned in config, so do not set it @@ -119,11 +119,11 @@ elif [ "${RABBITMQ_CONFIG_FILE_NOEX}.conf" == "${RABBITMQ_CONFIG_FILE}" ]; then -conf_advanced ${RABBITMQ_ADVANCED_CONFIG_FILE} \ -config ${RABBITMQ_ADVANCED_CONFIG_FILE}" fi -elif [ "${RABBITMQ_CONFIG_FILE_NOEX}" == "${RABBITMQ_CONFIG_FILE}" ]; then +else if [ -f "${RABBITMQ_CONFIG_FILE}.config" ]; then - RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE_NOEX}" + RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE}" elif [ -f "${RABBITMQ_CONFIG_FILE}.conf" ]; then - RABBITMQ_CONFIG_ARG="-conf ${RABBITMQ_CONFIG_FILE_NOEX} \ + RABBITMQ_CONFIG_ARG="-conf ${RABBITMQ_CONFIG_FILE} \ -conf_dir ${RABBITMQ_GENERATED_CONFIG_DIR} \ -conf_script_dir `dirname $0` \ -conf_schema_dir ${RABBITMQ_SCHEMA_DIR}" diff --git a/scripts/rabbitmq-server.bat b/scripts/rabbitmq-server.bat index 034abd472194..cae876f1d2ac 100644 --- a/scripts/rabbitmq-server.bat +++ b/scripts/rabbitmq-server.bat @@ -68,21 +68,7 @@ set RABBITMQ_EBIN_PATH="-pa !RABBITMQ_EBIN_ROOT!" CALL :get_noex !RABBITMQ_CONFIG_FILE! -if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!" ( - if exist "!RABBITMQ_CONFIG_FILE!.config" ( - set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE_NOEX!" - ) else if exist "!RABBITMQ_CONFIG_FILE!.conf" ( - set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE_NOEX!" ^ - -conf_dir !RABBITMQ_GENERATED_CONFIG_DIR! ^ - -conf_script_dir !CONF_SCRIPT_DIR:\=/! ^ - -conf_schema_dir !RABBITMQ_SCHEMA_DIR! - if exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( - set RABBITMQ_CONFIG_ARG=!RABBITMQ_CONFIG_ARG! ^ - -conf_advanced "!RABBITMQ_ADVANCED_CONFIG_FILE!" ^ - -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" - ) - ) -) else if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( +if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( if exist "!RABBITMQ_CONFIG_FILE!" ( set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE_NOEX!" ) @@ -96,6 +82,20 @@ if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!" ( -conf_advanced "!RABBITMQ_ADVANCED_CONFIG_FILE!" ^ -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" ) +) else ( + if exist "!RABBITMQ_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" + ) else if exist "!RABBITMQ_CONFIG_FILE!.conf" ( + set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE!" ^ + -conf_dir !RABBITMQ_GENERATED_CONFIG_DIR! ^ + -conf_script_dir !CONF_SCRIPT_DIR:\=/! ^ + -conf_schema_dir !RABBITMQ_SCHEMA_DIR! + if exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=!RABBITMQ_CONFIG_ARG! ^ + -conf_advanced "!RABBITMQ_ADVANCED_CONFIG_FILE!" ^ + -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" + ) + ) ) set RABBITMQ_LISTEN_ARG= @@ -126,7 +126,7 @@ if "!RABBITMQ_NODE_ONLY!"=="" ( if "!RABBITMQ_IO_THREAD_POOL_SIZE!"=="" ( set RABBITMQ_IO_THREAD_POOL_SIZE=64 -) +) set ENV_OK=true diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index 58e92eb36dd4..e635cb689679 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -159,9 +159,24 @@ if not exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( CALL :get_noex !RABBITMQ_CONFIG_FILE! -if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!" ( - if exist "!RABBITMQ_CONFIG_FILE!.config" ( + +if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( + if exist "!RABBITMQ_CONFIG_FILE!" ( set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE_NOEX!" + ) +) else if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.conf" ( + set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE_NOEX!" ^ + -conf_dir !RABBITMQ_GENERATED_CONFIG_DIR! ^ + -conf_script_dir !CONF_SCRIPT_DIR:\=/! ^ + -conf_schema_dir !RABBITMQ_SCHEMA_DIR! + if exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=!RABBITMQ_CONFIG_ARG! ^ + -conf_advanced "!RABBITMQ_ADVANCED_CONFIG_FILE!" ^ + -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" + ) +) else ( + if exist "!RABBITMQ_CONFIG_FILE!.config" ( + set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" ) else if exist "!RABBITMQ_CONFIG_FILE!.config" ( set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" ) else ( @@ -177,20 +192,6 @@ if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!" ( -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" ) ) -) else if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( - if exist "!RABBITMQ_CONFIG_FILE!" ( - set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE_NOEX!" - ) -) else if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.conf" ( - set RABBITMQ_CONFIG_ARG=-conf "!RABBITMQ_CONFIG_FILE_NOEX!" ^ - -conf_dir !RABBITMQ_GENERATED_CONFIG_DIR! ^ - -conf_script_dir !CONF_SCRIPT_DIR:\=/! ^ - -conf_schema_dir !RABBITMQ_SCHEMA_DIR! - if exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( - set RABBITMQ_CONFIG_ARG=!RABBITMQ_CONFIG_ARG! ^ - -conf_advanced "!RABBITMQ_ADVANCED_CONFIG_FILE!" ^ - -config "!RABBITMQ_ADVANCED_CONFIG_FILE!" - ) ) From 6a5f0266071ad5a47f51bad8da5b0f498ea9a65f Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Thu, 31 Mar 2016 11:08:26 +0100 Subject: [PATCH 033/174] Equality operators and trimming extension --- scripts/rabbitmq-server | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server index 62fb7b05fd56..465ee1a53e3a 100755 --- a/scripts/rabbitmq-server +++ b/scripts/rabbitmq-server @@ -95,21 +95,13 @@ fi set -e -strip_trailing_config_or_conf() { - local dir=$(dirname $1) - local filename_without_conf=$(basename $1 .conf) - local filename_without_config=$(basename $filename_without_conf .config) +RABBITMQ_CONFIG_FILE_NOEX="${RABBITMQ_CONFIG_FILE%.*}" - echo "$dir/$filename_without_config" -} - -RABBITMQ_CONFIG_FILE_NOEX=$(strip_trailing_config_or_conf $RABBITMQ_CONFIG_FILE) - -if [ "${RABBITMQ_CONFIG_FILE_NOEX}.config" == "${RABBITMQ_CONFIG_FILE}" ]; then +if [ "${RABBITMQ_CONFIG_FILE_NOEX}.config" = "${RABBITMQ_CONFIG_FILE}" ]; then if [ -f "${RABBITMQ_CONFIG_FILE}" ]; then RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE_NOEX}" fi -elif [ "${RABBITMQ_CONFIG_FILE_NOEX}.conf" == "${RABBITMQ_CONFIG_FILE}" ]; then +elif [ "${RABBITMQ_CONFIG_FILE_NOEX}.conf" = "${RABBITMQ_CONFIG_FILE}" ]; then RABBITMQ_CONFIG_ARG="-conf ${RABBITMQ_CONFIG_FILE_NOEX} \ -conf_dir ${RABBITMQ_GENERATED_CONFIG_DIR} \ -conf_script_dir `dirname $0` \ From 6074ef205fc8def000296afebdd258febb56e041 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Fri, 1 Apr 2016 18:39:49 +0100 Subject: [PATCH 034/174] Support extension in advanced.config --- scripts/rabbitmq-server | 5 +++++ scripts/rabbitmq-server.bat | 9 +++++++-- scripts/rabbitmq-service.bat | 9 ++++++--- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server index 465ee1a53e3a..507a3ebfd9e0 100755 --- a/scripts/rabbitmq-server +++ b/scripts/rabbitmq-server @@ -62,6 +62,11 @@ RABBITMQ_EBIN_ROOT="${RABBITMQ_HOME}/ebin" set +e +RABBITMQ_ADVANCED_CONFIG_FILE_NOEX="${RABBITMQ_ADVANCED_CONFIG_FILE%.*}" +if [ "${RABBITMQ_ADVANCED_CONFIG_FILE_NOEX}.config" = "${RABBITMQ_ADVANCED_CONFIG_FILE}" ]; then + RABBITMQ_ADVANCED_CONFIG_FILE="${RABBITMQ_ADVANCED_CONFIG_FILE_NOEX}" +fi + # NOTIFY_SOCKET is needed here to prevent epmd from impersonating the # success of our startup sequence to systemd. NOTIFY_SOCKET= \ diff --git a/scripts/rabbitmq-server.bat b/scripts/rabbitmq-server.bat index cae876f1d2ac..8718197471d7 100644 --- a/scripts/rabbitmq-server.bat +++ b/scripts/rabbitmq-server.bat @@ -42,6 +42,11 @@ if not exist "!ERLANG_HOME!\bin\erl.exe" ( set RABBITMQ_EBIN_ROOT=!RABBITMQ_HOME!\ebin +for %%NOEX in (CALL :get_noex !RABBITMQ_ADVANCED_CONFIG_FILE!) do RABBITMQ_ADVANCED_CONFIG_FILE_NOEX=%%NOEX +if "!RABBITMQ_ADVANCED_CONFIG_FILE!" == "!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX!.config" ( + RABBITMQ_ADVANCED_CONFIG_FILE=!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX! +) + "!ERLANG_HOME!\bin\erl.exe" ^ -pa "!RABBITMQ_EBIN_ROOT!" ^ -noinput -hidden ^ @@ -66,7 +71,7 @@ if not exist "!RABBITMQ_SCHEMA_DIR!\rabbitmq.schema" ( set RABBITMQ_EBIN_PATH="-pa !RABBITMQ_EBIN_ROOT!" -CALL :get_noex !RABBITMQ_CONFIG_FILE! +for %%NOEX in (CALL :get_noex !RABBITMQ_CONFIG_FILE!) do RABBITMQ_CONFIG_FILE_NOEX=%%NOEX if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( if exist "!RABBITMQ_CONFIG_FILE!" ( @@ -178,7 +183,7 @@ if "%~2"=="" ( EXIT /B 0 :get_noex -set RABBITMQ_CONFIG_FILE_NOEX=%~dpn1 +echo %~dpn1 EXIT /B 0 endlocal diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index e635cb689679..ed1829c0d621 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -126,6 +126,10 @@ set RABBITMQ_EBIN_ROOT=!RABBITMQ_HOME!\ebin set RABBITMQ_CONFIG_FILE="!RABBITMQ_CONFIG_FILE!" +for %%NOEX in (CALL :get_noex !RABBITMQ_ADVANCED_CONFIG_FILE!) do RABBITMQ_ADVANCED_CONFIG_FILE_NOEX=%%NOEX +if "!RABBITMQ_ADVANCED_CONFIG_FILE!" == "!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX!.config" ( + RABBITMQ_ADVANCED_CONFIG_FILE=!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX! +) "!ERLANG_HOME!\bin\erl.exe" ^ -pa "!RABBITMQ_EBIN_ROOT!" ^ @@ -157,8 +161,7 @@ if not exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( echo []. > !RABBITMQ_ADVANCED_CONFIG_FILE!.config ) -CALL :get_noex !RABBITMQ_CONFIG_FILE! - +for %%NOEX in (CALL :get_noex !RABBITMQ_CONFIG_FILE!) do RABBITMQ_CONFIG_FILE_NOEX=%%NOEX if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( if exist "!RABBITMQ_CONFIG_FILE!" ( @@ -290,7 +293,7 @@ if "%~2"=="" ( EXIT /B 0 :get_noex -set RABBITMQ_CONFIG_FILE_NOEX=%~dpn1 +echo %~dpn1 EXIT /B 0 endlocal From 6c27b6f2770b5be3c7253f705c8c958317371c1e Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Mon, 4 Apr 2016 11:46:37 +0100 Subject: [PATCH 035/174] Unquoting and extension trimming in windows scripts --- scripts/rabbitmq-env.bat | 3 ++- scripts/rabbitmq-server.bat | 8 ++++---- scripts/rabbitmq-service.bat | 9 +++++---- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/scripts/rabbitmq-env.bat b/scripts/rabbitmq-env.bat index 19b20a468077..71752c667f9a 100644 --- a/scripts/rabbitmq-env.bat +++ b/scripts/rabbitmq-env.bat @@ -172,11 +172,12 @@ if "!RABBITMQ_GENERATED_CONFIG_DIR!"=="" ( ) ) +CALL :unquote RABBITMQ_ADVANCED_CONFIG_FILE %RABBITMQ_ADVANCED_CONFIG_FILE% if "!RABBITMQ_ADVANCED_CONFIG_FILE!"=="" ( if "!ADVANCED_CONFIG_FILE!"=="" ( set RABBITMQ_ADVANCED_CONFIG_FILE=!RABBITMQ_BASE!\advanced ) else ( - set RABBITMQ_ADVANCED_CONFIG_FILE=!GENERATED_CONFIG_DIR! + set RABBITMQ_ADVANCED_CONFIG_FILE=!ADVANCED_CONFIG_FILE! ) ) diff --git a/scripts/rabbitmq-server.bat b/scripts/rabbitmq-server.bat index 8718197471d7..d87dc9d5fbde 100644 --- a/scripts/rabbitmq-server.bat +++ b/scripts/rabbitmq-server.bat @@ -42,9 +42,9 @@ if not exist "!ERLANG_HOME!\bin\erl.exe" ( set RABBITMQ_EBIN_ROOT=!RABBITMQ_HOME!\ebin -for %%NOEX in (CALL :get_noex !RABBITMQ_ADVANCED_CONFIG_FILE!) do RABBITMQ_ADVANCED_CONFIG_FILE_NOEX=%%NOEX +CALL :get_noex !RABBITMQ_ADVANCED_CONFIG_FILE! RABBITMQ_ADVANCED_CONFIG_FILE_NOEX if "!RABBITMQ_ADVANCED_CONFIG_FILE!" == "!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX!.config" ( - RABBITMQ_ADVANCED_CONFIG_FILE=!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX! + set RABBITMQ_ADVANCED_CONFIG_FILE=!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX! ) "!ERLANG_HOME!\bin\erl.exe" ^ @@ -71,7 +71,7 @@ if not exist "!RABBITMQ_SCHEMA_DIR!\rabbitmq.schema" ( set RABBITMQ_EBIN_PATH="-pa !RABBITMQ_EBIN_ROOT!" -for %%NOEX in (CALL :get_noex !RABBITMQ_CONFIG_FILE!) do RABBITMQ_CONFIG_FILE_NOEX=%%NOEX +CALL :get_noex !RABBITMQ_CONFIG_FILE! RABBITMQ_CONFIG_FILE_NOEX if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( if exist "!RABBITMQ_CONFIG_FILE!" ( @@ -183,7 +183,7 @@ if "%~2"=="" ( EXIT /B 0 :get_noex -echo %~dpn1 +set "%~2=%~dpn1" EXIT /B 0 endlocal diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index ed1829c0d621..843336267225 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -126,9 +126,10 @@ set RABBITMQ_EBIN_ROOT=!RABBITMQ_HOME!\ebin set RABBITMQ_CONFIG_FILE="!RABBITMQ_CONFIG_FILE!" -for %%NOEX in (CALL :get_noex !RABBITMQ_ADVANCED_CONFIG_FILE!) do RABBITMQ_ADVANCED_CONFIG_FILE_NOEX=%%NOEX +CALL :get_noex !RABBITMQ_ADVANCED_CONFIG_FILE! RABBITMQ_ADVANCED_CONFIG_FILE_NOEX + if "!RABBITMQ_ADVANCED_CONFIG_FILE!" == "!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX!.config" ( - RABBITMQ_ADVANCED_CONFIG_FILE=!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX! + set RABBITMQ_ADVANCED_CONFIG_FILE=!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX! ) "!ERLANG_HOME!\bin\erl.exe" ^ @@ -161,7 +162,7 @@ if not exist "!RABBITMQ_ADVANCED_CONFIG_FILE!.config" ( echo []. > !RABBITMQ_ADVANCED_CONFIG_FILE!.config ) -for %%NOEX in (CALL :get_noex !RABBITMQ_CONFIG_FILE!) do RABBITMQ_CONFIG_FILE_NOEX=%%NOEX +CALL :get_noex !RABBITMQ_CONFIG_FILE! RABBITMQ_CONFIG_FILE_NOEX if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( if exist "!RABBITMQ_CONFIG_FILE!" ( @@ -293,7 +294,7 @@ if "%~2"=="" ( EXIT /B 0 :get_noex -echo %~dpn1 +set "%~2=%~dpn1" EXIT /B 0 endlocal From a1636c499cee36c7fd0fe30b0b48eb90628267cd Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Mon, 4 Apr 2016 14:58:05 +0100 Subject: [PATCH 036/174] Remove start arguments log --- scripts/rabbitmq-service.bat | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index 843336267225..7225c91b3c29 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -253,8 +253,6 @@ set ERLANG_SERVICE_ARGUMENTS= ^ !RABBITMQ_DIST_ARG! ^ !STARVAR! -echo "!ERLANG_SERVICE_ARGUMENTS!" > "!RABBITMQ_CONFIG_FILE!.txt" - set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:\=\\! set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:"=\"! From e472def85b0937cdceba67117f3e8e6ee435d2ca Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Tue, 5 Apr 2016 11:05:17 +0100 Subject: [PATCH 037/174] Remove redundant lines --- scripts/rabbitmq-service.bat | 4 ---- 1 file changed, 4 deletions(-) diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index 7225c91b3c29..8a77fa176917 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -124,8 +124,6 @@ if errorlevel 1 ( set RABBITMQ_EBIN_ROOT=!RABBITMQ_HOME!\ebin -set RABBITMQ_CONFIG_FILE="!RABBITMQ_CONFIG_FILE!" - CALL :get_noex !RABBITMQ_ADVANCED_CONFIG_FILE! RABBITMQ_ADVANCED_CONFIG_FILE_NOEX if "!RABBITMQ_ADVANCED_CONFIG_FILE!" == "!RABBITMQ_ADVANCED_CONFIG_FILE_NOEX!.config" ( @@ -181,8 +179,6 @@ if "!RABBITMQ_CONFIG_FILE!" == "!RABBITMQ_CONFIG_FILE_NOEX!.config" ( ) else ( if exist "!RABBITMQ_CONFIG_FILE!.config" ( set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" - ) else if exist "!RABBITMQ_CONFIG_FILE!.config" ( - set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" ) else ( rem Always specify generated config arguments, we cannot rem assume .conf file is available From 01dcd2ba79b7799dd1cef0231ab62e50481941a5 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Thu, 7 Apr 2016 12:55:11 +0200 Subject: [PATCH 038/174] Stop a rabbitmq pacemaker resource when monitor fails Related Fuel bug https://bugs.launchpad.net/fuel/+bug/1567355 Signed-off-by: Bogdan Dobrelya --- scripts/rabbitmq-server-ha.ocf | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/rabbitmq-server-ha.ocf b/scripts/rabbitmq-server-ha.ocf index 5ead9a81ad7f..301f7a1fc7ca 100755 --- a/scripts/rabbitmq-server-ha.ocf +++ b/scripts/rabbitmq-server-ha.ocf @@ -1479,6 +1479,7 @@ get_monitor() { if [ -n "$master_name" ]; then ocf_log info "${LH} master exists and rabbit app is not running. Exiting to be restarted by pacemaker" + stop_server_process rc=$OCF_ERR_GENERIC fi fi From 77cf284870049dae7dda3a7566df0a55fad55c12 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Thu, 7 Apr 2016 15:16:52 +0100 Subject: [PATCH 039/174] Reverted version in app.src --- src/rabbit.app.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit.app.src b/src/rabbit.app.src index f40622c32c55..83e7237c806d 100644 --- a/src/rabbit.app.src +++ b/src/rabbit.app.src @@ -1,7 +1,7 @@ {application, rabbit, %% -*- erlang -*- [{description, "RabbitMQ"}, {id, "RabbitMQ"}, - {vsn, "3.7.0"}, + {vsn, "0.0.0"}, {modules, []}, {registered, [rabbit_amqqueue_sup, rabbit_log, From 64540f2de0ead2b37caab184e7ac43a3349c855c Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Thu, 7 Apr 2016 16:47:48 +0100 Subject: [PATCH 040/174] Naming --- src/rabbit_plugins.erl | 47 ++++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index 6d7c3b8f35da..2d68b1369114 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -24,7 +24,7 @@ -export([validate_plugins/1, format_invalid_plugins/1]). % Export for testing purpose. --export([version_support/2, validate_plugins/2]). +-export([is_version_supported/2, validate_plugins/2]). %%---------------------------------------------------------------------------- -ifdef(use_specs). @@ -278,10 +278,10 @@ format_invalid_plugin({Name, Errors}) -> format_invalid_plugin_error({missing_dependency, Dep}) -> io_lib:format(" Dependency is missing or invalid: ~p~n", [Dep]); -format_invalid_plugin_error({version_mismatch, {Version, Required}}) -> +format_invalid_plugin_error({broker_version_mismatch, Version, Required}) -> io_lib:format(" Broker version is invalid." " Current version: ~p Required: ~p~n", [Version, Required]); -format_invalid_plugin_error({{version_mismatch, {Version, Required}}, Name}) -> +format_invalid_plugin_error({{version_mismatch, Version, Required}, Name}) -> io_lib:format(" ~p plugin version is invalid." " Current version: ~p Required: ~p~n", [Name, Version, Required]); @@ -298,17 +298,19 @@ validate_plugins(Plugins) -> validate_plugins(Plugins, RabbitVersion) -> lists:foldl( - fun(#plugin{name = Name, - rabbitmq_versions = RabbitmqVersions, - plugins_versions = PluginsVersions} = Plugin, + fun(#plugin{name = Name, + broker_version_requirements = RabbitmqVersions, + dependency_version_requirements = DepsVersions} = Plugin, {Plugins0, Errors}) -> - case version_support(RabbitVersion, RabbitmqVersions) of - {error, Err} -> {Plugins0, [{Name, [Err]} | Errors]}; - ok -> - case check_plugins_versions(Plugins0, PluginsVersions) of + case is_version_supported(RabbitVersion, RabbitmqVersions) of + true -> + case check_plugins_versions(Plugins0, DepsVersions) of ok -> {[Plugin | Plugins0], Errors}; {error, Err} -> {Plugins0, [{Name, Err} | Errors]} - end + end; + false -> + Error = [{broker_version_mismatch, RabbitVersion, RabbitmqVersions}], + {Plugins0, [{Name, Error} | Errors]} end end, {[],[]}, @@ -322,9 +324,10 @@ check_plugins_versions(AllPlugins, RequiredVersions) -> case proplists:get_value(Name, ExistingVersions) of undefined -> [{missing_dependency, Name} | Acc]; Version -> - case version_support(Version, Versions) of - {error, Err} -> [{Err, Name} | Acc]; - ok -> Acc + case is_version_supported(Version, Versions) of + true -> Acc; + false -> + [{{version_mismatch, Version, Versions}, Name} | Acc] end end end, @@ -335,16 +338,16 @@ check_plugins_versions(AllPlugins, RequiredVersions) -> _ -> {error, Problems} end. -version_support(_Version, []) -> ok; -version_support(Version, ExpectedVersions) -> +is_version_supported(_Version, []) -> true; +is_version_supported(Version, ExpectedVersions) -> case lists:any(fun(ExpectedVersion) -> rabbit_misc:version_minor_equivalent(ExpectedVersion, Version) andalso rabbit_misc:version_compare(ExpectedVersion, Version, lte) end, ExpectedVersions) of - true -> ok; - false -> {error, {version_mismatch, {Version, ExpectedVersions}}} + true -> true; + false -> false end. clean_plugins(Plugins) -> @@ -419,12 +422,12 @@ mkplugin(Name, Props, Type, Location) -> Version = proplists:get_value(vsn, Props, "0"), Description = proplists:get_value(description, Props, ""), Dependencies = proplists:get_value(applications, Props, []), - RabbitmqVersions = proplists:get_value(rabbitmq_versions, Props, []), - PluginsVersions = proplists:get_value(plugins_versions, Props, []), + RabbitmqVersions = proplists:get_value(broker_version_requirements, Props, []), + DepsVersions = proplists:get_value(dependency_version_requirements, Props, []), #plugin{name = Name, version = Version, description = Description, dependencies = Dependencies, location = Location, type = Type, - rabbitmq_versions = RabbitmqVersions, - plugins_versions = PluginsVersions}. + broker_version_requirements = RabbitmqVersions, + dependency_version_requirements = DepsVersions}. read_app_file(EZ) -> case zip:list_dir(EZ) of From c258acf87e635fd6ffc67fcf6c3e6b4371ecae06 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Thu, 7 Apr 2016 17:14:58 +0100 Subject: [PATCH 041/174] Naming --- src/rabbit_mnesia.erl | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 96c9bffdc898..1db61023126b 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -808,18 +808,19 @@ negotiate_protocol([Node]) -> mnesia_monitor:negotiate_protocol([Node]). with_running_or_clean_mnesia(Fun) -> - MnesiaRunning = case mnesia:system_info(is_running) of + IsMnesiaRunning = case mnesia:system_info(is_running) of + yes -> true; + no -> false; stopping -> ensure_mnesia_not_running(), - no; + false; starting -> ensure_mnesia_running(), - yes; - Other -> Other + true end, - case MnesiaRunning of - yes -> Fun(); - no -> + case IsMnesiaRunning of + true -> Fun(); + false -> {ok, MnesiaDir} = application:get_env(mnesia, dir), application:unset_env(mnesia, dir), mnesia:start(), From 2f58c3a6a51212528dd610b1a422b8deac12bcbc Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Fri, 8 Apr 2016 13:29:24 +0300 Subject: [PATCH 042/174] Naming --- src/rabbit_plugins.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index 2d68b1369114..8c6d231a77f6 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -422,11 +422,11 @@ mkplugin(Name, Props, Type, Location) -> Version = proplists:get_value(vsn, Props, "0"), Description = proplists:get_value(description, Props, ""), Dependencies = proplists:get_value(applications, Props, []), - RabbitmqVersions = proplists:get_value(broker_version_requirements, Props, []), + BrokerVersions = proplists:get_value(broker_version_requirements, Props, []), DepsVersions = proplists:get_value(dependency_version_requirements, Props, []), #plugin{name = Name, version = Version, description = Description, dependencies = Dependencies, location = Location, type = Type, - broker_version_requirements = RabbitmqVersions, + broker_version_requirements = BrokerVersions, dependency_version_requirements = DepsVersions}. read_app_file(EZ) -> From 75e64e76bf9a05d1e32befac90068de21423c0a5 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Fri, 8 Apr 2016 14:43:34 +0300 Subject: [PATCH 043/174] Naming, cosmetics --- src/rabbit_plugins.erl | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index 8c6d231a77f6..776bd32eea2d 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -254,7 +254,7 @@ prepare_plugins(Enabled) -> Wanted = dependencies(false, Enabled, AllPlugins), WantedPlugins = lookup_plugins(Wanted, AllPlugins), {ValidPlugins, Problems} = validate_plugins(WantedPlugins), - %TODO: error message formatting + %% TODO: error message formatting rabbit_log:warning(format_invalid_plugins(Problems)), case filelib:ensure_dir(ExpandDir ++ "/") of ok -> ok; @@ -279,11 +279,11 @@ format_invalid_plugin({Name, Errors}) -> format_invalid_plugin_error({missing_dependency, Dep}) -> io_lib:format(" Dependency is missing or invalid: ~p~n", [Dep]); format_invalid_plugin_error({broker_version_mismatch, Version, Required}) -> - io_lib:format(" Broker version is invalid." - " Current version: ~p Required: ~p~n", [Version, Required]); + io_lib:format(" Plugin doesn't support current server version." + " Actual broker version: ~p, supported by the plugin: ~p~n", [Version, Required]); format_invalid_plugin_error({{version_mismatch, Version, Required}, Name}) -> - io_lib:format(" ~p plugin version is invalid." - " Current version: ~p Required: ~p~n", + io_lib:format(" ~p plugin version is unsupported." + " Actual version: ~p, supported: ~p~n", [Name, Version, Required]); format_invalid_plugin_error(Err) -> io_lib:format(" Unknown error ~p~n", [Err]). @@ -296,20 +296,20 @@ validate_plugins(Plugins) -> end, validate_plugins(Plugins, RabbitVersion). -validate_plugins(Plugins, RabbitVersion) -> +validate_plugins(Plugins, BrokerVersion) -> lists:foldl( fun(#plugin{name = Name, - broker_version_requirements = RabbitmqVersions, + broker_version_requirements = BrokerVersionReqs, dependency_version_requirements = DepsVersions} = Plugin, {Plugins0, Errors}) -> - case is_version_supported(RabbitVersion, RabbitmqVersions) of + case is_version_supported(BrokerVersion, BrokerVersionReqs) of true -> case check_plugins_versions(Plugins0, DepsVersions) of ok -> {[Plugin | Plugins0], Errors}; {error, Err} -> {Plugins0, [{Name, Err} | Errors]} end; false -> - Error = [{broker_version_mismatch, RabbitVersion, RabbitmqVersions}], + Error = [{broker_version_mismatch, BrokerVersion, BrokerVersionReqs}], {Plugins0, [{Name, Error} | Errors]} end end, From 5ff8cece72045bf7781af173a8a6b44a08fd09c2 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Fri, 8 Apr 2016 15:11:02 +0300 Subject: [PATCH 044/174] Wording --- src/rabbit_plugins.erl | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index 776bd32eea2d..90a2028ef2ba 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -278,13 +278,15 @@ format_invalid_plugin({Name, Errors}) -> format_invalid_plugin_error({missing_dependency, Dep}) -> io_lib:format(" Dependency is missing or invalid: ~p~n", [Dep]); +%% a plugin doesn't support the effective broker version format_invalid_plugin_error({broker_version_mismatch, Version, Required}) -> io_lib:format(" Plugin doesn't support current server version." " Actual broker version: ~p, supported by the plugin: ~p~n", [Version, Required]); +%% one of dependencies of a plugin doesn't match its version requirements format_invalid_plugin_error({{version_mismatch, Version, Required}, Name}) -> - io_lib:format(" ~p plugin version is unsupported." - " Actual version: ~p, supported: ~p~n", - [Name, Version, Required]); + io_lib:format(" Version '~p' of dependency '~p' is unsupported." + " Version ranges supported by the plugin: ~p~n", + [Version, Name, Required]); format_invalid_plugin_error(Err) -> io_lib:format(" Unknown error ~p~n", [Err]). From 38ad97582336c12f46fdfabaad1d725398f63120 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Fri, 8 Apr 2016 14:34:41 +0100 Subject: [PATCH 045/174] Versions "" and "0.0.0" always match --- src/rabbit_plugins.erl | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index 90a2028ef2ba..9efefa240324 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -25,7 +25,6 @@ % Export for testing purpose. -export([is_version_supported/2, validate_plugins/2]). - %%---------------------------------------------------------------------------- -ifdef(use_specs). @@ -283,7 +282,7 @@ format_invalid_plugin_error({broker_version_mismatch, Version, Required}) -> io_lib:format(" Plugin doesn't support current server version." " Actual broker version: ~p, supported by the plugin: ~p~n", [Version, Required]); %% one of dependencies of a plugin doesn't match its version requirements -format_invalid_plugin_error({{version_mismatch, Version, Required}, Name}) -> +format_invalid_plugin_error({{dependency_version_mismatch, Version, Required}, Name}) -> io_lib:format(" Version '~p' of dependency '~p' is unsupported." " Version ranges supported by the plugin: ~p~n", [Version, Name, Required]); @@ -329,7 +328,7 @@ check_plugins_versions(AllPlugins, RequiredVersions) -> case is_version_supported(Version, Versions) of true -> Acc; false -> - [{{version_mismatch, Version, Versions}, Name} | Acc] + [{{dependency_version_mismatch, Version, Versions}, Name} | Acc] end end end, @@ -340,6 +339,8 @@ check_plugins_versions(AllPlugins, RequiredVersions) -> _ -> {error, Problems} end. +is_version_supported("", _) -> true; +is_version_supported("0.0.0", _) -> true; is_version_supported(_Version, []) -> true; is_version_supported(Version, ExpectedVersions) -> case lists:any(fun(ExpectedVersion) -> From 9f59ed5e1a7a8fc73a6e5e72989b6d79d0f49034 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Fri, 8 Apr 2016 15:00:54 +0100 Subject: [PATCH 046/174] Log development version ignore --- src/rabbit_plugins.erl | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index 9efefa240324..62b5527a2caf 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -305,7 +305,15 @@ validate_plugins(Plugins, BrokerVersion) -> {Plugins0, Errors}) -> case is_version_supported(BrokerVersion, BrokerVersionReqs) of true -> - case check_plugins_versions(Plugins0, DepsVersions) of + case BrokerVersion of + "0.0.0" -> + rabbit_log:warning( + "Using development version of broker." + " Requirement ~p for plugin ~p is ignored.", + [BrokerVersionReqs, Name]); + _ -> ok + end, + case check_plugins_versions(Name, Plugins0, DepsVersions) of ok -> {[Plugin | Plugins0], Errors}; {error, Err} -> {Plugins0, [{Name, Err} | Errors]} end; @@ -317,7 +325,7 @@ validate_plugins(Plugins, BrokerVersion) -> {[],[]}, Plugins). -check_plugins_versions(AllPlugins, RequiredVersions) -> +check_plugins_versions(PluginName, AllPlugins, RequiredVersions) -> ExistingVersions = [{Name, Vsn} || #plugin{name = Name, version = Vsn} <- AllPlugins], Problems = lists:foldl( @@ -326,7 +334,16 @@ check_plugins_versions(AllPlugins, RequiredVersions) -> undefined -> [{missing_dependency, Name} | Acc]; Version -> case is_version_supported(Version, Versions) of - true -> Acc; + true -> + case Version of + "" -> + rabbit_log:warning( + "~p plugin versoin is not defined." + " Requirement ~p for plugin ~p is ignored", + [Versions, PluginName]); + _ -> ok + end, + Acc; false -> [{{dependency_version_mismatch, Version, Versions}, Name} | Acc] end From aeecbec95807127d7c279dd87d15d212e6b29962 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Fri, 8 Apr 2016 22:18:29 +0300 Subject: [PATCH 047/174] Cosmetics --- src/rabbit.app.src | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit.app.src b/src/rabbit.app.src index 83e7237c806d..738a38e2bbf5 100644 --- a/src/rabbit.app.src +++ b/src/rabbit.app.src @@ -1,4 +1,5 @@ -{application, rabbit, %% -*- erlang -*- +%% -*- erlang -*- +{application, rabbit, [{description, "RabbitMQ"}, {id, "RabbitMQ"}, {vsn, "0.0.0"}, From 5cdc5d5e9970c67a2733fc4e9404022fea6a282c Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Fri, 8 Apr 2016 22:18:46 +0300 Subject: [PATCH 048/174] Wording --- src/rabbit_plugins.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index 62b5527a2caf..ea105af2d4a5 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -308,7 +308,7 @@ validate_plugins(Plugins, BrokerVersion) -> case BrokerVersion of "0.0.0" -> rabbit_log:warning( - "Using development version of broker." + "Using development version of the broker." " Requirement ~p for plugin ~p is ignored.", [BrokerVersionReqs, Name]); _ -> ok @@ -338,7 +338,7 @@ check_plugins_versions(PluginName, AllPlugins, RequiredVersions) -> case Version of "" -> rabbit_log:warning( - "~p plugin versoin is not defined." + "~p plugin version is not defined." " Requirement ~p for plugin ~p is ignored", [Versions, PluginName]); _ -> ok From de2dbed33f4c73065741eb96f1a3ee178c995adf Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Fri, 8 Apr 2016 22:50:03 +0300 Subject: [PATCH 049/174] Wording --- src/rabbit_plugins.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index ea105af2d4a5..8f7319182edf 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -308,7 +308,7 @@ validate_plugins(Plugins, BrokerVersion) -> case BrokerVersion of "0.0.0" -> rabbit_log:warning( - "Using development version of the broker." + "Running development version of the broker." " Requirement ~p for plugin ~p is ignored.", [BrokerVersionReqs, Name]); _ -> ok From c846ac819dd21074a213fe8c79447f2fda5318b2 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Mon, 11 Apr 2016 16:24:34 +0300 Subject: [PATCH 050/174] Update comment --- src/rabbit_mnesia.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 1db61023126b..2e8051bc3bc7 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -598,7 +598,8 @@ check_cluster_consistency(Node, CheckNodesConsistency) -> {badrpc, _Reason} -> {error, not_found}; {_OTP, Rabbit, Hash, _Status} when is_binary(Hash) -> - %% delegate hash checking implies version mismatch + %% when a delegate module .beam file hash is present + %% in the tuple, we are dealing with an old version rabbit_version:version_error("Rabbit", rabbit_misc:version(), Rabbit); {_OTP, _Rabbit, _Protocol, {error, _}} -> {error, not_found}; From 7d55ad5ac0f5c5700d6f24daf8da0df08e6cc899 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Mon, 11 Apr 2016 17:36:14 +0300 Subject: [PATCH 051/174] Naming --- src/rabbit_mnesia.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 2e8051bc3bc7..92424481d70e 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -597,7 +597,7 @@ check_cluster_consistency(Node, CheckNodesConsistency) -> case rpc:call(Node, rabbit_mnesia, node_info, []) of {badrpc, _Reason} -> {error, not_found}; - {_OTP, Rabbit, Hash, _Status} when is_binary(Hash) -> + {_OTP, Rabbit, DelegateModuleHash, _Status} when is_binary(DelegateModuleHash) -> %% when a delegate module .beam file hash is present %% in the tuple, we are dealing with an old version rabbit_version:version_error("Rabbit", rabbit_misc:version(), Rabbit); From 21c57385ade0f43cd3f47488e2985cda758e79db Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Mon, 11 Apr 2016 18:53:36 +0100 Subject: [PATCH 052/174] Compatibility with 3.6.x --- src/rabbit_mnesia.erl | 41 +++++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 92424481d70e..31f8e6a16cde 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -423,7 +423,6 @@ cluster_status(WhichNodes) -> node_info() -> {rabbit_misc:otp_release(), rabbit_misc:version(), - mnesia:system_info(protocol_version), cluster_status_from_mnesia()}. node_type() -> @@ -601,15 +600,15 @@ check_cluster_consistency(Node, CheckNodesConsistency) -> %% when a delegate module .beam file hash is present %% in the tuple, we are dealing with an old version rabbit_version:version_error("Rabbit", rabbit_misc:version(), Rabbit); - {_OTP, _Rabbit, _Protocol, {error, _}} -> + {_OTP, _Rabbit, {error, _}} -> {error, not_found}; - {_OTP, Rabbit, Protocol, {ok, Status}} when CheckNodesConsistency -> - case check_consistency(Node, Rabbit, Protocol, Status) of + {_OTP, Rabbit, {ok, Status}} when CheckNodesConsistency -> + case check_consistency(Node, Rabbit, Status) of {error, _} = E -> E; {ok, Res} -> {ok, Res} end; - {_OTP, Rabbit, Protocol, {ok, Status}} -> - case check_consistency(Node, Rabbit, Protocol) of + {_OTP, Rabbit, {ok, Status}} -> + case check_consistency(Node, Rabbit) of {error, _} = E -> E; ok -> {ok, Status} end @@ -765,14 +764,14 @@ change_extra_db_nodes(ClusterNodes0, CheckOtherNodes) -> Nodes end. -check_consistency(Node, Rabbit, ProtocolVersion) -> +check_consistency(Node, Rabbit) -> rabbit_misc:sequence_error( - [check_mnesia_consistency(Node, ProtocolVersion), + [check_mnesia_consistency(Node), check_rabbit_consistency(Rabbit)]). -check_consistency(Node, Rabbit, ProtocolVersion, Status) -> +check_consistency(Node, Rabbit, Status) -> rabbit_misc:sequence_error( - [check_mnesia_consistency(Node, ProtocolVersion), + [check_mnesia_consistency(Node), check_rabbit_consistency(Rabbit), check_nodes_consistency(Node, Status)]). @@ -787,7 +786,7 @@ check_nodes_consistency(Node, RemoteStatus = {RemoteAllNodes, _, _}) -> [node(), Node, Node])}} end. -check_mnesia_consistency(Node, ProtocolVersion) -> +check_mnesia_consistency(Node) -> % If mnesia is running we will just check protocol version % If it's not running, we don't want it to join cluster until all checks pass % so we start it without `dir` env variable to prevent @@ -796,15 +795,25 @@ check_mnesia_consistency(Node, ProtocolVersion) -> case negotiate_protocol([Node]) of [Node] -> ok; [] -> - LocalVersion = mnesia:system_info(protocol_version), + LocalVersion = protocol_version(), + RemoteVersion = protocol_version(Node), {error, {inconsistent_cluster, rabbit_misc:format("Mnesia protocol negotiation failed." " Local version: ~p." " Remote version ~p", - [LocalVersion, ProtocolVersion])}} + [LocalVersion, RemoteVersion])}} end end). +protocol_version() -> + mnesia:system_info(protocol_version). + +protocol_version(Node) when is_atom(Node) -> + case rpc:call(Node, mnesia, system_info, [protocol_version]) of + {badrpc, _} = Err -> {unknown, Err}; + Val -> Val + end. + negotiate_protocol([Node]) -> mnesia_monitor:negotiate_protocol([Node]). @@ -871,10 +880,10 @@ find_auto_cluster_node([Node | Nodes]) -> %% old delegate hash check {_OTP, RMQ, Hash, _} when is_binary(Hash) -> Fail("version ~s~n", [RMQ]); - {_OTP, _RMQ, _Protocol, {error, _} = E} -> + {_OTP, _RMQ, {error, _} = E} -> Fail("~p~n", [E]); - {OTP, RMQ, Protocol, _} -> - case check_consistency(Node, RMQ, Protocol) of + {OTP, RMQ, _} -> + case check_consistency(Node, RMQ) of {error, _} -> Fail("versions ~p~n", [{OTP, RMQ}]); ok -> {ok, Node} From 066d7e63198db640df479f6990856456a3a2eccc Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Tue, 12 Apr 2016 11:21:29 +0100 Subject: [PATCH 053/174] Check mnesia consistency starting from 3.6.2, OTP consistency in older versions --- src/rabbit_mnesia.erl | 58 +++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 27 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 31f8e6a16cde..95ea74b07d78 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -423,6 +423,7 @@ cluster_status(WhichNodes) -> node_info() -> {rabbit_misc:otp_release(), rabbit_misc:version(), + mnesia:system_info(protocol_version), cluster_status_from_mnesia()}. node_type() -> @@ -593,27 +594,35 @@ check_cluster_consistency() -> end. check_cluster_consistency(Node, CheckNodesConsistency) -> - case rpc:call(Node, rabbit_mnesia, node_info, []) of + case remote_node_info(Node) of {badrpc, _Reason} -> {error, not_found}; {_OTP, Rabbit, DelegateModuleHash, _Status} when is_binary(DelegateModuleHash) -> %% when a delegate module .beam file hash is present %% in the tuple, we are dealing with an old version rabbit_version:version_error("Rabbit", rabbit_misc:version(), Rabbit); - {_OTP, _Rabbit, {error, _}} -> + {_OTP, _Rabbit, _Protocol, {error, _}} -> {error, not_found}; - {_OTP, Rabbit, {ok, Status}} when CheckNodesConsistency -> - case check_consistency(Node, Rabbit, Status) of + {OTP, Rabbit, Protocol, {ok, Status}} when CheckNodesConsistency -> + case check_consistency(Node, OTP, Rabbit, Protocol, Status) of {error, _} = E -> E; {ok, Res} -> {ok, Res} end; - {_OTP, Rabbit, {ok, Status}} -> - case check_consistency(Node, Rabbit) of + {OTP, Rabbit, Protocol, {ok, Status}} -> + case check_consistency(Node, OTP, Rabbit, Protocol) of {error, _} = E -> E; ok -> {ok, Status} end end. +remote_node_info(Node) -> + case rpc:call(Node, rabbit_mnesia, node_info, []) of + {badrpc, _} = Error -> Error; + {OTP, Rabbit, Status} -> {OTP, Rabbit, unsupported, Status}; + {OTP, Rabbit, Protocol, Status} -> {OTP, Rabbit, Protocol, Status} + end. + + %%-------------------------------------------------------------------- %% Hooks for `rabbit_node_monitor' %%-------------------------------------------------------------------- @@ -764,14 +773,14 @@ change_extra_db_nodes(ClusterNodes0, CheckOtherNodes) -> Nodes end. -check_consistency(Node, Rabbit) -> +check_consistency(Node, OTP, Rabbit, ProtocolVersion) -> rabbit_misc:sequence_error( - [check_mnesia_consistency(Node), + [check_mnesia_or_otp_consistency(Node, ProtocolVersion, OTP), check_rabbit_consistency(Rabbit)]). -check_consistency(Node, Rabbit, Status) -> +check_consistency(Node, OTP, Rabbit, ProtocolVersion, Status) -> rabbit_misc:sequence_error( - [check_mnesia_consistency(Node), + [check_mnesia_or_otp_consistency(Node, ProtocolVersion, OTP), check_rabbit_consistency(Rabbit), check_nodes_consistency(Node, Status)]). @@ -786,7 +795,12 @@ check_nodes_consistency(Node, RemoteStatus = {RemoteAllNodes, _, _}) -> [node(), Node, Node])}} end. -check_mnesia_consistency(Node) -> +check_mnesia_or_otp_consistency(_Node, unsupported, OTP) -> + rabbit_version:check_otp_consistency(OTP); +check_mnesia_or_otp_consistency(Node, ProtocolVersion, _) -> + check_mnesia_consistency(Node, ProtocolVersion). + +check_mnesia_consistency(Node, ProtocolVersion) -> % If mnesia is running we will just check protocol version % If it's not running, we don't want it to join cluster until all checks pass % so we start it without `dir` env variable to prevent @@ -795,25 +809,15 @@ check_mnesia_consistency(Node) -> case negotiate_protocol([Node]) of [Node] -> ok; [] -> - LocalVersion = protocol_version(), - RemoteVersion = protocol_version(Node), + LocalVersion = mnesia:system_info(protocol_version), {error, {inconsistent_cluster, rabbit_misc:format("Mnesia protocol negotiation failed." " Local version: ~p." " Remote version ~p", - [LocalVersion, RemoteVersion])}} + [LocalVersion, ProtocolVersion])}} end end). -protocol_version() -> - mnesia:system_info(protocol_version). - -protocol_version(Node) when is_atom(Node) -> - case rpc:call(Node, mnesia, system_info, [protocol_version]) of - {badrpc, _} = Err -> {unknown, Err}; - Val -> Val - end. - negotiate_protocol([Node]) -> mnesia_monitor:negotiate_protocol([Node]). @@ -874,16 +878,16 @@ find_auto_cluster_node([Node | Nodes]) -> "Could not auto-cluster with ~s: " ++ Fmt, [Node | Args]), find_auto_cluster_node(Nodes) end, - case rpc:call(Node, rabbit_mnesia, node_info, []) of + case remote_node_info(Node) of {badrpc, _} = Reason -> Fail("~p~n", [Reason]); %% old delegate hash check {_OTP, RMQ, Hash, _} when is_binary(Hash) -> Fail("version ~s~n", [RMQ]); - {_OTP, _RMQ, {error, _} = E} -> + {_OTP, _RMQ, _Protocol, {error, _} = E} -> Fail("~p~n", [E]); - {OTP, RMQ, _} -> - case check_consistency(Node, RMQ) of + {OTP, RMQ, Protocol, _} -> + case check_consistency(Node, OTP, RMQ, Protocol) of {error, _} -> Fail("versions ~p~n", [{OTP, RMQ}]); ok -> {ok, Node} From e5724d797f18aec8ab86b9a4054e1183a5bc063f Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Wed, 13 Apr 2016 11:31:38 +0300 Subject: [PATCH 054/174] Explain --- src/rabbit_mnesia.erl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 95ea74b07d78..6a57f6bb2cd4 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -618,7 +618,9 @@ check_cluster_consistency(Node, CheckNodesConsistency) -> remote_node_info(Node) -> case rpc:call(Node, rabbit_mnesia, node_info, []) of {badrpc, _} = Error -> Error; + %% RabbitMQ prior to 3.6.2 {OTP, Rabbit, Status} -> {OTP, Rabbit, unsupported, Status}; + %% RabbitMQ 3.6.2 or later {OTP, Rabbit, Protocol, Status} -> {OTP, Rabbit, Protocol, Status} end. From f9c558d49d60362d9cbbf9284c458a6c83f16f85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Wed, 13 Apr 2016 12:30:25 +0200 Subject: [PATCH 055/174] rabbitmqctl set_policy: Format error messages References #742. [#117521029] --- src/rabbit_control_main.erl | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/rabbit_control_main.erl b/src/rabbit_control_main.erl index d4d61662768b..f63694b65743 100644 --- a/src/rabbit_control_main.erl +++ b/src/rabbit_control_main.erl @@ -508,9 +508,15 @@ action(set_policy, Node, [Key, Pattern, Defn], Opts, Inform) -> PriorityArg = proplists:get_value(?PRIORITY_OPT, Opts), ApplyToArg = list_to_binary(proplists:get_value(?APPLY_TO_OPT, Opts)), Inform(Msg, [Key, Pattern, Defn, PriorityArg]), - rpc_call( + Res = rpc_call( Node, rabbit_policy, parse_set, - [VHostArg, list_to_binary(Key), Pattern, Defn, PriorityArg, ApplyToArg]); + [VHostArg, list_to_binary(Key), Pattern, Defn, PriorityArg, ApplyToArg]), + case Res of + {error, Format, Args} when is_list(Format) andalso is_list(Args) -> + {error_string, rabbit_misc:format(Format, Args)}; + _ -> + Res + end; action(clear_policy, Node, [Key], Opts, Inform) -> VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), From b149da724210dba4021dbe49936ff7cabc28c3c6 Mon Sep 17 00:00:00 2001 From: Alexey Lebedeff Date: Wed, 13 Apr 2016 18:37:29 +0300 Subject: [PATCH 056/174] Detect missing 'user' process 'user' process can be unrecoverably crashed with good timing. As a result some rabbitmqctl commands will stop working, e.g. add_user: rabbitmqctl -n rabbit@localhost add_user ley ley Creating user "ley" ... Error: {badarg, [{erlang,group_leader,[undefined,<5428.28745.44>],[]}, {rabbit_log,with_local_io,1, [{file,"src/rabbit_log.erl"},{line,99}]}, {rabbit_auth_backend_internal,add_user,2, [{file,"src/rabbit_auth_backend_internal.erl"},{line,149}]}, {rpc,'-handle_call_call/6-fun-0-',5, [{file,"rpc.erl"},{line,206}]}]} Exact sequence events that will crash 'user' is the following: - Move startup_log file to a separate partition - Start server - Fill partition to the fullest - Do stop_app/start_app several times, so all the remaning slack will be used by log records. - At some point 'user' will crash. - Free some space on partition with startup_log - Observe that any rabbit action that requires logging is now broken --- src/rabbit_log.erl | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index c6081fad0d0b..e3e1d7042f07 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -96,10 +96,18 @@ with_local_io(Fun) -> Node = node(), case node(GL) of Node -> Fun(); - _ -> group_leader(whereis(user), self()), + _ -> set_group_leader_to_user(), try Fun() after group_leader(GL, self()) end end. + +set_group_leader_to_user() -> + case whereis(user) of + undefined -> + warning("'user' IO process has died, you'd better restart erlang VM"); + User -> + group_leader(User, self()) + end. From e77ff6980fed41fb30fc4b03575527f8864e2008 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Wed, 13 Apr 2016 20:39:47 +0300 Subject: [PATCH 057/174] Wording --- src/rabbit_log.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index e3e1d7042f07..ed73a293ca33 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -107,7 +107,7 @@ with_local_io(Fun) -> set_group_leader_to_user() -> case whereis(user) of undefined -> - warning("'user' IO process has died, you'd better restart erlang VM"); + warning("the 'user' I/O process has terminated, some features will fail until Erlang VM is restarted"); User -> group_leader(User, self()) end. From 059271001a2b8fc41ff4958e9ff9f958307ef340 Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Thu, 7 Apr 2016 14:34:30 +0100 Subject: [PATCH 058/174] Stop deleted GM member when partial partition is detected * Avoids deadlock on syncing queues and inconsistent state across the cluster * Queue must restart and rejoin the group --- src/gm.erl | 351 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 221 insertions(+), 130 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index aeb050e15fea..199cf7c4de16 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -617,14 +617,20 @@ handle_call({add_on_right, NewMember}, _From, group_name = GroupName, members_state = MembersState, txn_executor = TxnFun }) -> - Group = record_new_member_in_group(NewMember, Self, GroupName, TxnFun), - View1 = group_to_view(Group), - MembersState1 = remove_erased_members(MembersState, View1), - ok = send_right(NewMember, View1, - {catchup, Self, prepare_members_state(MembersState1)}), - {Result, State1} = change_view(View1, State #state { - members_state = MembersState1 }), - handle_callback_result({Result, {ok, Group}, State1}). + try + Group = record_new_member_in_group( + NewMember, Self, GroupName, TxnFun), + View1 = group_to_view(check_membership(Self, Group)), + MembersState1 = remove_erased_members(MembersState, View1), + ok = send_right(NewMember, View1, + {catchup, Self, prepare_members_state(MembersState1)}), + {Result, State1} = change_view(View1, State #state { + members_state = MembersState1 }), + handle_callback_result({Result, {ok, Group}, State1}) + catch + lost_membership -> + {stop, normal, State} + end. %% add_on_right causes a catchup to be sent immediately from the left, %% so we can never see this from the left neighbour. However, it's @@ -638,19 +644,28 @@ handle_cast({?TAG, _ReqVer, check_neighbours}, handle_cast({?TAG, ReqVer, Msg}, State = #state { view = View, + self = Self, members_state = MembersState, group_name = GroupName }) -> - {Result, State1} = - case needs_view_update(ReqVer, View) of - true -> View1 = group_to_view(dirty_read_group(GroupName)), - MemberState1 = remove_erased_members(MembersState, View1), - change_view(View1, State #state { - members_state = MemberState1 }); - false -> {ok, State} - end, - handle_callback_result( - if_callback_success( - Result, fun handle_msg_true/3, fun handle_msg_false/3, Msg, State1)); + try + {Result, State1} = + case needs_view_update(ReqVer, View) of + true -> + View1 = group_to_view( + check_membership(Self, + dirty_read_group(GroupName))), + MemberState1 = remove_erased_members(MembersState, View1), + change_view(View1, State #state { + members_state = MemberState1 }); + false -> {ok, State} + end, + handle_callback_result( + if_callback_success( + Result, fun handle_msg_true/3, fun handle_msg_false/3, Msg, State1)) + catch + lost_membership -> + {stop, normal, State} + end; handle_cast({broadcast, _Msg, _SizeHint}, State = #state { shutting_down = {true, _} }) -> @@ -724,39 +739,44 @@ handle_info({'DOWN', MRef, process, _Pid, Reason}, group_name = GroupName, confirms = Confirms, txn_executor = TxnFun }) -> - Member = case {Left, Right} of - {{Member1, MRef}, _} -> Member1; - {_, {Member1, MRef}} -> Member1; - _ -> undefined - end, - case {Member, Reason} of - {undefined, _} -> - noreply(State); - {_, {shutdown, ring_shutdown}} -> - noreply(State); - _ -> - %% In the event of a partial partition we could see another member - %% go down and then remove them from Mnesia. While they can - %% recover from this they'd have to restart the queue - not - %% ideal. So let's sleep here briefly just in case this was caused - %% by a partial partition; in which case by the time we record the - %% member death in Mnesia we will probably be in a full - %% partition and will not be assassinating another member. - timer:sleep(100), - View1 = group_to_view(record_dead_member_in_group( - Member, GroupName, TxnFun)), - handle_callback_result( - case alive_view_members(View1) of - [Self] -> maybe_erase_aliases( - State #state { - members_state = blank_member_state(), - confirms = purge_confirms(Confirms) }, - View1); - _ -> change_view(View1, State) - end) + try + check_membership(GroupName), + Member = case {Left, Right} of + {{Member1, MRef}, _} -> Member1; + {_, {Member1, MRef}} -> Member1; + _ -> undefined + end, + case {Member, Reason} of + {undefined, _} -> + noreply(State); + {_, {shutdown, ring_shutdown}} -> + noreply(State); + _ -> + %% In the event of a partial partition we could see another member + %% go down and then remove them from Mnesia. While they can + %% recover from this they'd have to restart the queue - not + %% ideal. So let's sleep here briefly just in case this was caused + %% by a partial partition; in which case by the time we record the + %% member death in Mnesia we will probably be in a full + %% partition and will not be assassinating another member. + timer:sleep(100), + View1 = group_to_view(record_dead_member_in_group(Self, + Member, GroupName, TxnFun, true)), + handle_callback_result( + case alive_view_members(View1) of + [Self] -> maybe_erase_aliases( + State #state { + members_state = blank_member_state(), + confirms = purge_confirms(Confirms) }, + View1); + _ -> change_view(View1, State) + end) + end + catch + lost_membership -> + {stop, normal, State} end. - terminate(Reason, #state { module = Module, callback_args = Args }) -> Module:handle_terminate(Args, Reason). @@ -841,52 +861,30 @@ handle_msg({catchup, _NotLeft, _MembersState}, State) -> handle_msg({activity, Left, Activity}, State = #state { self = Self, + group_name = GroupName, left = {Left, _MRefL}, view = View, members_state = MembersState, confirms = Confirms }) when MembersState =/= undefined -> - {MembersState1, {Confirms1, Activity1}} = - lists:foldl( - fun ({Id, Pubs, Acks}, MembersStateConfirmsActivity) -> - with_member_acc( - fun (Member = #member { pending_ack = PA, - last_pub = LP, - last_ack = LA }, - {Confirms2, Activity2}) -> - case is_member_alias(Id, Self, View) of - true -> - {ToAck, PA1} = - find_common(queue_from_pubs(Pubs), PA, - queue:new()), - LA1 = last_ack(Acks, LA), - AckNums = acks_from_queue(ToAck), - Confirms3 = maybe_confirm( - Self, Id, Confirms2, AckNums), - {Member #member { pending_ack = PA1, - last_ack = LA1 }, - {Confirms3, - activity_cons( - Id, [], AckNums, Activity2)}}; - false -> - PA1 = apply_acks(Acks, join_pubs(PA, Pubs)), - LA1 = last_ack(Acks, LA), - LP1 = last_pub(Pubs, LP), - {Member #member { pending_ack = PA1, - last_pub = LP1, - last_ack = LA1 }, - {Confirms2, - activity_cons(Id, Pubs, Acks, Activity2)}} - end - end, Id, MembersStateConfirmsActivity) - end, {MembersState, {Confirms, activity_nil()}}, Activity), - State1 = State #state { members_state = MembersState1, - confirms = Confirms1 }, - Activity3 = activity_finalise(Activity1), - ok = maybe_send_activity(Activity3, State1), - {Result, State2} = maybe_erase_aliases(State1, View), - if_callback_success( - Result, fun activity_true/3, fun activity_false/3, Activity3, State2); + try + %% If we have to stop, do it asap so we avoid any ack confirmation + %% Membership must be checked again by erase_members_in_group, as the + %% node can be marked as dead on the meanwhile + check_membership(GroupName), + {MembersState1, {Confirms1, Activity1}} = + calculate_activity(MembersState, Confirms, Activity, Self, View), + State1 = State #state { members_state = MembersState1, + confirms = Confirms1 }, + Activity3 = activity_finalise(Activity1), + ok = maybe_send_activity(Activity3, State1), + {Result, State2} = maybe_erase_aliases(State1, View), + if_callback_success( + Result, fun activity_true/3, fun activity_false/3, Activity3, State2) + catch + lost_membership -> + {{stop, normal}, State} + end; handle_msg({activity, _NotLeft, _Activity}, State) -> {ok, State}. @@ -1091,8 +1089,8 @@ join_group(Self, GroupName, #gm_group { members = Members } = Group, TxnFun) -> fun () -> join_group( Self, GroupName, - record_dead_member_in_group( - Left, GroupName, TxnFun), + record_dead_member_in_group(Self, + Left, GroupName, TxnFun, false), TxnFun) end, try @@ -1142,47 +1140,84 @@ prune_or_create_group(Self, GroupName, TxnFun) -> end end). -record_dead_member_in_group(Member, GroupName, TxnFun) -> - TxnFun( - fun () -> - Group = #gm_group { members = Members, version = Ver } = - read_group(GroupName), - case lists:splitwith( - fun (Member1) -> Member1 =/= Member end, Members) of - {_Members1, []} -> %% not found - already recorded dead - Group; - {Members1, [Member | Members2]} -> - Members3 = Members1 ++ [{dead, Member} | Members2], - write_group(Group #gm_group { members = Members3, - version = Ver + 1 }) - end - end). +record_dead_member_in_group(Self, Member, GroupName, TxnFun, Verify) -> + Fun = + fun () -> + try + Group = #gm_group { members = Members, version = Ver } = + case Verify of + true -> + check_membership(Self, read_group(GroupName)); + false -> + read_group(GroupName) + end, + case lists:splitwith( + fun (Member1) -> Member1 =/= Member end, Members) of + {_Members1, []} -> %% not found - already recorded dead + Group; + {Members1, [Member | Members2]} -> + Members3 = Members1 ++ [{dead, Member} | Members2], + write_group(Group #gm_group { members = Members3, + version = Ver + 1 }) + end + catch + lost_membership -> + %% The transaction must not be abruptly crashed, but + %% leave the gen_server to stop normally + {error, lost_membership} + end + end, + handle_lost_membership_in_txn(TxnFun, Fun). + +handle_lost_membership_in_txn(TxnFun, Fun) -> + case TxnFun(Fun) of + {error, lost_membership = T} -> + throw(T); + Any -> + Any + end. record_new_member_in_group(NewMember, Left, GroupName, TxnFun) -> - TxnFun( - fun () -> - Group = #gm_group { members = Members, version = Ver } = - read_group(GroupName), - {Prefix, [Left | Suffix]} = - lists:splitwith(fun (M) -> M =/= Left end, Members), - write_group(Group #gm_group { - members = Prefix ++ [Left, NewMember | Suffix], - version = Ver + 1 }) - end). + Fun = + fun () -> + try + Group = #gm_group { members = Members, version = Ver } = + check_membership(Left, read_group(GroupName)), + {Prefix, [Left | Suffix]} = + lists:splitwith(fun (M) -> M =/= Left end, Members), + write_group(Group #gm_group { + members = Prefix ++ [Left, NewMember | Suffix], + version = Ver + 1 }) + catch + lost_membership -> + %% The transaction must not be abruptly crashed, but + %% leave the gen_server to stop normally + {error, lost_membership} + end + end, + handle_lost_membership_in_txn(TxnFun, Fun). -erase_members_in_group(Members, GroupName, TxnFun) -> +erase_members_in_group(Self, Members, GroupName, TxnFun) -> DeadMembers = [{dead, Id} || Id <- Members], - TxnFun( - fun () -> - Group = #gm_group { members = [_|_] = Members1, version = Ver } = - read_group(GroupName), - case Members1 -- DeadMembers of - Members1 -> Group; - Members2 -> write_group( - Group #gm_group { members = Members2, - version = Ver + 1 }) + Fun = + fun () -> + try + Group = #gm_group { members = [_|_] = Members1, version = Ver } = + check_membership(Self, read_group(GroupName)), + case Members1 -- DeadMembers of + Members1 -> Group; + Members2 -> write_group( + Group #gm_group { members = Members2, + version = Ver + 1 }) + end + catch + lost_membership -> + %% The transaction must not be abruptly crashed, but + %% leave the gen_server to stop normally + {error, lost_membership} end - end). + end, + handle_lost_membership_in_txn(TxnFun, Fun). maybe_erase_aliases(State = #state { self = Self, group_name = GroupName, @@ -1203,7 +1238,7 @@ maybe_erase_aliases(State = #state { self = Self, View1 = case Erasable of [] -> View; _ -> group_to_view( - erase_members_in_group(Erasable, GroupName, TxnFun)) + erase_members_in_group(Self, Erasable, GroupName, TxnFun)) end, change_view(View1, State #state { members_state = MembersState1 }). @@ -1378,6 +1413,41 @@ maybe_send_activity(Activity, #state { self = Self, send_right(Right, View, Msg) -> ok = neighbour_cast(Right, {?TAG, view_version(View), Msg}). +calculate_activity(MembersState, Confirms, Activity, Self, View) -> + lists:foldl( + fun ({Id, Pubs, Acks}, MembersStateConfirmsActivity) -> + with_member_acc( + fun (Member = #member { pending_ack = PA, + last_pub = LP, + last_ack = LA }, + {Confirms2, Activity2}) -> + case is_member_alias(Id, Self, View) of + true -> + {ToAck, PA1} = + find_common(queue_from_pubs(Pubs), PA, + queue:new()), + LA1 = last_ack(Acks, LA), + AckNums = acks_from_queue(ToAck), + Confirms3 = maybe_confirm( + Self, Id, Confirms2, AckNums), + {Member #member { pending_ack = PA1, + last_ack = LA1 }, + {Confirms3, + activity_cons( + Id, [], AckNums, Activity2)}}; + false -> + PA1 = apply_acks(Acks, join_pubs(PA, Pubs)), + LA1 = last_ack(Acks, LA), + LP1 = last_pub(Pubs, LP), + {Member #member { pending_ack = PA1, + last_pub = LP1, + last_ack = LA1 }, + {Confirms2, + activity_cons(Id, Pubs, Acks, Activity2)}} + end + end, Id, MembersStateConfirmsActivity) + end, {MembersState, {Confirms, activity_nil()}}, Activity). + callback(Args, Module, Activity) -> Result = lists:foldl( @@ -1530,3 +1600,24 @@ call(Pid, Msg, Timeout) -> gen_server2:call(Pid, Msg, Timeout). cast(Pid, Msg) -> gen_server2:cast(Pid, Msg). monitor(Pid) -> erlang:monitor(process, Pid). demonitor(MRef) -> erlang:demonitor(MRef). + +check_membership(Self, #gm_group{members = M} = Group) -> + case lists:member(Self, M) of + true -> + Group; + false -> + throw(lost_membership) + end. + +check_membership(GroupName) -> + case dirty_read_group(GroupName) of + #gm_group{members = M} -> + case lists:keymember(self(), 2, M) of + true -> + ok; + false -> + throw(lost_membership) + end; + {error, not_found} -> + throw(lost_membership) + end. From 5f675aaaf2c9161c26c406d834f82fb070e2f12e Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Thu, 14 Apr 2016 18:14:39 +0100 Subject: [PATCH 059/174] SCHEMA_DIR set to writable location by default --- scripts/rabbitmq-defaults | 2 +- scripts/rabbitmq-defaults.bat | 2 +- scripts/rabbitmq-server | 4 ++++ scripts/rabbitmq-server.bat | 4 ++++ scripts/rabbitmq-service.bat | 4 ++++ 5 files changed, 14 insertions(+), 2 deletions(-) diff --git a/scripts/rabbitmq-defaults b/scripts/rabbitmq-defaults index 1505ce0f00b9..494f20af2b73 100755 --- a/scripts/rabbitmq-defaults +++ b/scripts/rabbitmq-defaults @@ -40,7 +40,7 @@ MNESIA_BASE=${SYS_PREFIX}/var/lib/rabbitmq/mnesia ENABLED_PLUGINS_FILE=${SYS_PREFIX}/etc/rabbitmq/enabled_plugins GENERATED_CONFIG_DIR=${SYS_PREFIX}/var/lib/rabbitmq/config ADVANCED_CONFIG_FILE=${SYS_PREFIX}/etc/rabbitmq/advanced -SCHEMA_DIR=${RABBITMQ_HOME}/priv/schema +SCHEMA_DIR=${SYS_PREFIX}/etc/rabbitmq/schema PLUGINS_DIR="${RABBITMQ_HOME}/plugins" diff --git a/scripts/rabbitmq-defaults.bat b/scripts/rabbitmq-defaults.bat index 6919ca6a4eca..0246dc64fda7 100644 --- a/scripts/rabbitmq-defaults.bat +++ b/scripts/rabbitmq-defaults.bat @@ -43,7 +43,7 @@ set MNESIA_BASE=!RABBITMQ_BASE!\db set ENABLED_PLUGINS_FILE=!RABBITMQ_BASE!\enabled_plugins set GENERATED_CONFIG_DIR=!RABBITMQ_BASE!\config set ADVANCED_CONFIG_FILE=!RABBITMQ_BASE!\advanced -set SCHEMA_DIR=!RABBITMQ_HOME!\priv\schema +set SCHEMA_DIR=!RABBITMQ_BASE!\schema REM PLUGINS_DIR="${RABBITMQ_HOME}/plugins" for /f "delims=" %%F in ("!TDP0!..\plugins") do set PLUGINS_DIR=%%~dpsF%%~nF%%~xF diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server index 507a3ebfd9e0..8a6279dc6565 100755 --- a/scripts/rabbitmq-server +++ b/scripts/rabbitmq-server @@ -94,6 +94,10 @@ else exit ${PRELAUNCH_RESULT} fi +if [ ! -d ${RABBITMQ_SCHEMA_DIR} ]; then + mkdir "${RABBITMQ_SCHEMA_DIR}" +fi + if [ ! -f "${RABBITMQ_SCHEMA_DIR}/rabbitmq.schema" ]; then cp "${RABBITMQ_HOME}/priv/schema/rabbitmq.schema" "${RABBITMQ_SCHEMA_DIR}" fi diff --git a/scripts/rabbitmq-server.bat b/scripts/rabbitmq-server.bat index d87dc9d5fbde..20a0bd882312 100644 --- a/scripts/rabbitmq-server.bat +++ b/scripts/rabbitmq-server.bat @@ -65,6 +65,10 @@ if ERRORLEVEL 2 ( set RABBITMQ_DIST_ARG=-kernel inet_dist_listen_min !RABBITMQ_DIST_PORT! -kernel inet_dist_listen_max !RABBITMQ_DIST_PORT! ) +if not exist "!RABBITMQ_SCHEMA_DIR!" ( + mkdir "!RABBITMQ_SCHEMA_DIR!" +) + if not exist "!RABBITMQ_SCHEMA_DIR!\rabbitmq.schema" ( copy "!RABBITMQ_HOME!\priv\schema\rabbitmq.schema" "!RABBITMQ_SCHEMA_DIR!\rabbitmq.schema" ) diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index 8a77fa176917..2fb34ddb2881 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -151,6 +151,10 @@ if ERRORLEVEL 3 ( set RABBITMQ_DIST_ARG=-kernel inet_dist_listen_min !RABBITMQ_DIST_PORT! -kernel inet_dist_listen_max !RABBITMQ_DIST_PORT! ) +if not exist "!RABBITMQ_SCHEMA_DIR!" ( + mkdir "!RABBITMQ_SCHEMA_DIR!" +) + if not exist "!RABBITMQ_SCHEMA_DIR!\rabbitmq.schema" ( copy "!RABBITMQ_HOME!\priv\schema\rabbitmq.schema" "!RABBITMQ_SCHEMA_DIR!\rabbitmq.schema" ) From f9a47d7db5103feae577de574df481ad61a791c1 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Fri, 15 Apr 2016 14:07:40 +0100 Subject: [PATCH 060/174] Changed default schema dir to /var/lib/rabbitmq/schema --- scripts/rabbitmq-defaults | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/rabbitmq-defaults b/scripts/rabbitmq-defaults index 494f20af2b73..53429786949b 100755 --- a/scripts/rabbitmq-defaults +++ b/scripts/rabbitmq-defaults @@ -40,7 +40,7 @@ MNESIA_BASE=${SYS_PREFIX}/var/lib/rabbitmq/mnesia ENABLED_PLUGINS_FILE=${SYS_PREFIX}/etc/rabbitmq/enabled_plugins GENERATED_CONFIG_DIR=${SYS_PREFIX}/var/lib/rabbitmq/config ADVANCED_CONFIG_FILE=${SYS_PREFIX}/etc/rabbitmq/advanced -SCHEMA_DIR=${SYS_PREFIX}/etc/rabbitmq/schema +SCHEMA_DIR=${SYS_PREFIX}/var/lib/rabbitmq/schema PLUGINS_DIR="${RABBITMQ_HOME}/plugins" From 868176d779361e5235130038b09106c0c801453d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Wed, 13 Apr 2016 12:50:25 +0200 Subject: [PATCH 061/174] rabbit_policy: Forward exit signals update_policies() transaction Thus, any transaction abort are properly forwarded. This fixes a `function_clause` because the code called `update_exchange()` or `update_queue()` with the caught exit signal. Fixes #744. [#117522069] --- src/rabbit_policy.erl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rabbit_policy.erl b/src/rabbit_policy.erl index 1f7e521dfdde..c29783468957 100644 --- a/src/rabbit_policy.erl +++ b/src/rabbit_policy.erl @@ -242,6 +242,8 @@ update_policies(VHost) -> fun() -> [mnesia:lock({table, T}, write) || T <- Tabs], %% [1] case catch list(VHost) of + {'EXIT', Exit} -> + exit(Exit); {error, {no_such_vhost, _}} -> ok; %% [2] Policies -> From bac2dac787b4ff1c58fae3bdde7d53eea6a0e94c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Mon, 18 Apr 2016 18:07:43 +0200 Subject: [PATCH 062/174] rabbit_policy: Fix transaction return value in udpate_policies() If a vhost is removed while policies are being updated, the transaction returned `ok` while the calling code expects a tuple. Fixes #755. [#117522069] --- src/rabbit_policy.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_policy.erl b/src/rabbit_policy.erl index c29783468957..a66f353b3f98 100644 --- a/src/rabbit_policy.erl +++ b/src/rabbit_policy.erl @@ -245,7 +245,7 @@ update_policies(VHost) -> {'EXIT', Exit} -> exit(Exit); {error, {no_such_vhost, _}} -> - ok; %% [2] + {[], []}; %% [2] Policies -> {[update_exchange(X, Policies) || X <- rabbit_exchange:list(VHost)], From 49b8a4014af62ce86063ac6f4595a96175971c47 Mon Sep 17 00:00:00 2001 From: sylvainhubsch Date: Mon, 18 Apr 2016 14:55:20 -0700 Subject: [PATCH 063/174] Match on types deleted and shortcuts algo --- src/rabbit_exchange_type_headers.erl | 52 ++++++++++++++++++---------- 1 file changed, 34 insertions(+), 18 deletions(-) diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index fe344ba86e35..444d507c7e59 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -85,35 +85,51 @@ headers_match(Args, Data) -> MK = parse_x_match(rabbit_misc:table_lookup(Args, <<"x-match">>)), headers_match(Args, Data, true, false, MK). -headers_match([], _Data, AllMatch, _AnyMatch, all) -> - AllMatch; -headers_match([], _Data, _AllMatch, AnyMatch, any) -> - AnyMatch; +% A bit less horrendous algorithm :) +headers_match(_, _, false, _, all) -> false; +headers_match(_, _, _, true, any) -> true; + +% No more bindings, return current state +headers_match([], _Data, AllMatch, _AnyMatch, all) -> AllMatch; +headers_match([], _Data, _AllMatch, AnyMatch, any) -> AnyMatch; + +% Delete bindings starting with x- headers_match([{<<"x-", _/binary>>, _PT, _PV} | PRest], Data, AllMatch, AnyMatch, MatchKind) -> headers_match(PRest, Data, AllMatch, AnyMatch, MatchKind); + +% No more data, but still bindings, false with all headers_match(_Pattern, [], _AllMatch, AnyMatch, MatchKind) -> headers_match([], [], false, AnyMatch, MatchKind); + +% Data key header not in binding, go next data headers_match(Pattern = [{PK, _PT, _PV} | _], [{DK, _DT, _DV} | DRest], AllMatch, AnyMatch, MatchKind) when PK > DK -> headers_match(Pattern, DRest, AllMatch, AnyMatch, MatchKind); + +% Binding key header not in data, false with all, go next binding headers_match([{PK, _PT, _PV} | PRest], Data = [{DK, _DT, _DV} | _], _AllMatch, AnyMatch, MatchKind) when PK < DK -> headers_match(PRest, Data, false, AnyMatch, MatchKind); -headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest], - AllMatch, AnyMatch, MatchKind) when PK == DK -> - {AllMatch1, AnyMatch1} = - case rabbit_misc:type_class(PT) == rabbit_misc:type_class(DT) of - %% It's not properly specified, but a "no value" in a - %% pattern field is supposed to mean simple presence of - %% the corresponding data field. I've interpreted that to - %% mean a type of "void" for the pattern field. - _ when PT == void -> {AllMatch, true}; - false -> {false, AnyMatch}; - _ when PV == DV -> {AllMatch, true}; - _ -> {false, AnyMatch} - end, - headers_match(PRest, DRest, AllMatch1, AnyMatch1, MatchKind). + +%% It's not properly specified, but a "no value" in a +%% pattern field is supposed to mean simple presence of +%% the corresponding data field. I've interpreted that to +%% mean a type of "void" for the pattern field. +headers_match([{PK, void, _PV} | PRest], [{DK, _DT, _DV} | DRest], + AllMatch, _AnyMatch, MatchKind) when PK == DK -> + headers_match(PRest, DRest, AllMatch, true, MatchKind); + +% Complete match, true with any, go next +headers_match([{PK, _PT, PV} | PRest], [{DK, _DT, DV} | DRest], + AllMatch, _AnyMatch, MatchKind) when PK == DK andalso PV == DV -> + headers_match(PRest, DRest, AllMatch, true, MatchKind); + +% Value does not match, false with all, go next +headers_match([{PK, _PT, _PV} | PRest], [{DK, _DT, _DV} | DRest], + _AllMatch, AnyMatch, MatchKind) when PK == DK -> + headers_match(PRest, DRest, false, AnyMatch, MatchKind). + validate(_X) -> ok. create(_Tx, _X) -> ok. From d7b45c28b18298a808b2c3c245366fd82c263ccd Mon Sep 17 00:00:00 2001 From: sylvainhubsch Date: Tue, 19 Apr 2016 00:46:07 -0700 Subject: [PATCH 064/174] Compare values only on headers match --- src/rabbit_exchange_type_headers.erl | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index fe344ba86e35..60825086f585 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -100,20 +100,15 @@ headers_match(Pattern = [{PK, _PT, _PV} | _], [{DK, _DT, _DV} | DRest], headers_match([{PK, _PT, _PV} | PRest], Data = [{DK, _DT, _DV} | _], _AllMatch, AnyMatch, MatchKind) when PK < DK -> headers_match(PRest, Data, false, AnyMatch, MatchKind); -headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest], - AllMatch, AnyMatch, MatchKind) when PK == DK -> - {AllMatch1, AnyMatch1} = - case rabbit_misc:type_class(PT) == rabbit_misc:type_class(DT) of - %% It's not properly specified, but a "no value" in a - %% pattern field is supposed to mean simple presence of - %% the corresponding data field. I've interpreted that to - %% mean a type of "void" for the pattern field. - _ when PT == void -> {AllMatch, true}; - false -> {false, AnyMatch}; - _ when PV == DV -> {AllMatch, true}; - _ -> {false, AnyMatch} - end, - headers_match(PRest, DRest, AllMatch1, AnyMatch1, MatchKind). +headers_match([{PK, void, _PV} | PRest], [{DK, _DT, _DV} | DRest], + AllMatch, _AnyMatch, MatchKind) when PK == DK -> + headers_match(PRest, DRest, AllMatch, true, MatchKind); +headers_match([{PK, _PT, PV} | PRest], [{DK, _DT, DV} | DRest], + AllMatch, _AnyMatch, MatchKind) when PK == DK andalso PV == DV -> + headers_match(PRest, DRest, AllMatch, true, MatchKind); +headers_match([{PK, _PT, _PV} | PRest], [{DK, _DT, _DV} | DRest], + _AllMatch, AnyMatch, MatchKind) when PK == DK -> + headers_match(PRest, DRest, false, AnyMatch, MatchKind). validate(_X) -> ok. create(_Tx, _X) -> ok. From 8b9a54c5cae6541dadef021f35abc50b84b6cf96 Mon Sep 17 00:00:00 2001 From: sylvainhubsch Date: Tue, 19 Apr 2016 00:52:30 -0700 Subject: [PATCH 065/174] Stop match earlier, and added some comments --- src/rabbit_exchange_type_headers.erl | 29 ++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index 60825086f585..444d507c7e59 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -85,31 +85,52 @@ headers_match(Args, Data) -> MK = parse_x_match(rabbit_misc:table_lookup(Args, <<"x-match">>)), headers_match(Args, Data, true, false, MK). -headers_match([], _Data, AllMatch, _AnyMatch, all) -> - AllMatch; -headers_match([], _Data, _AllMatch, AnyMatch, any) -> - AnyMatch; +% A bit less horrendous algorithm :) +headers_match(_, _, false, _, all) -> false; +headers_match(_, _, _, true, any) -> true; + +% No more bindings, return current state +headers_match([], _Data, AllMatch, _AnyMatch, all) -> AllMatch; +headers_match([], _Data, _AllMatch, AnyMatch, any) -> AnyMatch; + +% Delete bindings starting with x- headers_match([{<<"x-", _/binary>>, _PT, _PV} | PRest], Data, AllMatch, AnyMatch, MatchKind) -> headers_match(PRest, Data, AllMatch, AnyMatch, MatchKind); + +% No more data, but still bindings, false with all headers_match(_Pattern, [], _AllMatch, AnyMatch, MatchKind) -> headers_match([], [], false, AnyMatch, MatchKind); + +% Data key header not in binding, go next data headers_match(Pattern = [{PK, _PT, _PV} | _], [{DK, _DT, _DV} | DRest], AllMatch, AnyMatch, MatchKind) when PK > DK -> headers_match(Pattern, DRest, AllMatch, AnyMatch, MatchKind); + +% Binding key header not in data, false with all, go next binding headers_match([{PK, _PT, _PV} | PRest], Data = [{DK, _DT, _DV} | _], _AllMatch, AnyMatch, MatchKind) when PK < DK -> headers_match(PRest, Data, false, AnyMatch, MatchKind); + +%% It's not properly specified, but a "no value" in a +%% pattern field is supposed to mean simple presence of +%% the corresponding data field. I've interpreted that to +%% mean a type of "void" for the pattern field. headers_match([{PK, void, _PV} | PRest], [{DK, _DT, _DV} | DRest], AllMatch, _AnyMatch, MatchKind) when PK == DK -> headers_match(PRest, DRest, AllMatch, true, MatchKind); + +% Complete match, true with any, go next headers_match([{PK, _PT, PV} | PRest], [{DK, _DT, DV} | DRest], AllMatch, _AnyMatch, MatchKind) when PK == DK andalso PV == DV -> headers_match(PRest, DRest, AllMatch, true, MatchKind); + +% Value does not match, false with all, go next headers_match([{PK, _PT, _PV} | PRest], [{DK, _DT, _DV} | DRest], _AllMatch, AnyMatch, MatchKind) when PK == DK -> headers_match(PRest, DRest, false, AnyMatch, MatchKind). + validate(_X) -> ok. create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. From c0450138586379a9f8e5554e15a8fc5dde6aece3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Tue, 19 Apr 2016 16:08:32 +0200 Subject: [PATCH 066/174] rabbit_policy: Fix case clause for missing vhost If the vhost is missing, `rabbit_vhost:assert(VHost)` throws `{error, {no_such_vhost, _}}` which is caught by `mnesia:async_dirty/1` and wrapped inside a tuple which is passed to `exit()`. Therefore, the catch in the transaction function returns: `{'EXIT', {throw, {error, {no_such_vhost, _}}}}` not: `{error, {no_such_vhost, _}}` Fixes #759. [#117522069] --- src/rabbit_policy.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_policy.erl b/src/rabbit_policy.erl index a66f353b3f98..d04551043e6a 100644 --- a/src/rabbit_policy.erl +++ b/src/rabbit_policy.erl @@ -242,10 +242,10 @@ update_policies(VHost) -> fun() -> [mnesia:lock({table, T}, write) || T <- Tabs], %% [1] case catch list(VHost) of + {'EXIT', {throw, {error, {no_such_vhost, _}}}} -> + {[], []}; %% [2] {'EXIT', Exit} -> exit(Exit); - {error, {no_such_vhost, _}} -> - {[], []}; %% [2] Policies -> {[update_exchange(X, Policies) || X <- rabbit_exchange:list(VHost)], From bc7b5c667a22ea3fa9e60b5fffd48756b27de843 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 20 Apr 2016 14:02:07 +0200 Subject: [PATCH 067/174] Stop process when rabbit is running but is not connected to master. It's should goes down due to avoid split brain. Related Fuel bug: https://bugs.launchpad.net/fuel/+bug/1541471 Signed-off-by: Bogdan Dobrelya Co-authored-by: Maciej Relewicz --- scripts/rabbitmq-server-ha.ocf | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/rabbitmq-server-ha.ocf b/scripts/rabbitmq-server-ha.ocf index 301f7a1fc7ca..06eeb508374d 100755 --- a/scripts/rabbitmq-server-ha.ocf +++ b/scripts/rabbitmq-server-ha.ocf @@ -1463,6 +1463,7 @@ get_monitor() { # Rabbit is running but is not connected to master # Failing to avoid split brain ocf_log err "${LH} rabbit node is running out of the cluster" + stop_server_process rc=$OCF_ERR_GENERIC fi fi From bb777dec813e8480c3f301de996a0a32aae308e5 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Thu, 21 Apr 2016 15:05:16 +0100 Subject: [PATCH 068/174] Do not move rabbitmq.conf to rabbitmq-env.conf during RPM and DEB install --- packaging/RPMS/Fedora/rabbitmq-server.spec | 6 +----- packaging/debs/Debian/debian/postinst | 7 +------ 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/packaging/RPMS/Fedora/rabbitmq-server.spec b/packaging/RPMS/Fedora/rabbitmq-server.spec index b6715d430a6d..90f16a59d83e 100644 --- a/packaging/RPMS/Fedora/rabbitmq-server.spec +++ b/packaging/RPMS/Fedora/rabbitmq-server.spec @@ -91,11 +91,7 @@ fi %post /sbin/chkconfig --add %{name} -if [ -f %{_sysconfdir}/rabbitmq/rabbitmq.conf ] && [ ! -f %{_sysconfdir}/rabbitmq/rabbitmq-env.conf ]; then - mv %{_sysconfdir}/rabbitmq/rabbitmq.conf %{_sysconfdir}/rabbitmq/rabbitmq-env.conf -else - touch %{_sysconfdir}/rabbitmq/rabbitmq-env.conf -fi +touch %{_sysconfdir}/rabbitmq/rabbitmq-env.conf chmod -R o-rwx,g-w %{_localstatedir}/lib/rabbitmq/mnesia %preun diff --git a/packaging/debs/Debian/debian/postinst b/packaging/debs/Debian/debian/postinst index 2439612c428d..2a4bc4391fe5 100644 --- a/packaging/debs/Debian/debian/postinst +++ b/packaging/debs/Debian/debian/postinst @@ -37,12 +37,7 @@ chmod -R o-rwx,g-w /var/lib/rabbitmq/mnesia case "$1" in configure) - if [ -f /etc/rabbitmq/rabbitmq.conf ] && \ - [ ! -f /etc/rabbitmq/rabbitmq-env.conf ]; then - mv /etc/rabbitmq/rabbitmq.conf /etc/rabbitmq/rabbitmq-env.conf - else - touch /etc/rabbitmq/rabbitmq-env.conf - fi + touch /etc/rabbitmq/rabbitmq-env.conf ;; abort-upgrade|abort-remove|abort-deconfigure) From 74cafa3c57ab1899314a811798e65e5d7416bcbc Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Mon, 25 Apr 2016 17:07:48 +0100 Subject: [PATCH 069/174] Do not create rabbitmq-env.conf file during installation --- packaging/RPMS/Fedora/rabbitmq-server.spec | 1 - packaging/debs/Debian/debian/postinst | 1 - 2 files changed, 2 deletions(-) diff --git a/packaging/RPMS/Fedora/rabbitmq-server.spec b/packaging/RPMS/Fedora/rabbitmq-server.spec index 90f16a59d83e..04676b08028e 100644 --- a/packaging/RPMS/Fedora/rabbitmq-server.spec +++ b/packaging/RPMS/Fedora/rabbitmq-server.spec @@ -91,7 +91,6 @@ fi %post /sbin/chkconfig --add %{name} -touch %{_sysconfdir}/rabbitmq/rabbitmq-env.conf chmod -R o-rwx,g-w %{_localstatedir}/lib/rabbitmq/mnesia %preun diff --git a/packaging/debs/Debian/debian/postinst b/packaging/debs/Debian/debian/postinst index 2a4bc4391fe5..f49f2aa40ffd 100644 --- a/packaging/debs/Debian/debian/postinst +++ b/packaging/debs/Debian/debian/postinst @@ -37,7 +37,6 @@ chmod -R o-rwx,g-w /var/lib/rabbitmq/mnesia case "$1" in configure) - touch /etc/rabbitmq/rabbitmq-env.conf ;; abort-upgrade|abort-remove|abort-deconfigure) From 4eaa46ef0ea1e1777c86a5ddc93e0cf447a3f448 Mon Sep 17 00:00:00 2001 From: Alexey Lebedeff Date: Fri, 22 Apr 2016 17:06:19 +0300 Subject: [PATCH 070/174] Don't use hardcoded path to `df` Search the `PATH` instead, because e.g. NixOS has no `/bin/df`. --- src/rabbit_disk_monitor.erl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/rabbit_disk_monitor.erl b/src/rabbit_disk_monitor.erl index 88a8096fd483..a56b92b50116 100644 --- a/src/rabbit_disk_monitor.erl +++ b/src/rabbit_disk_monitor.erl @@ -213,9 +213,11 @@ get_disk_free(Dir) -> get_disk_free(Dir, {unix, Sun}) when Sun =:= sunos; Sun =:= sunos4; Sun =:= solaris -> - parse_free_unix(rabbit_misc:os_cmd("/usr/bin/df -k " ++ Dir)); + Df = os:find_executable("df"), + parse_free_unix(rabbit_misc:os_cmd(Df ++ " -k " ++ Dir)); get_disk_free(Dir, {unix, _}) -> - parse_free_unix(rabbit_misc:os_cmd("/bin/df -kP " ++ Dir)); + Df = os:find_executable("df"), + parse_free_unix(rabbit_misc:os_cmd(Df ++ " -kP " ++ Dir)); get_disk_free(Dir, {win32, _}) -> parse_free_win32(rabbit_misc:os_cmd("dir /-C /W \"" ++ Dir ++ "\"")). From 5fb86319c444d459dee3606d56fbde335382f26e Mon Sep 17 00:00:00 2001 From: Alexey Lebedeff Date: Fri, 22 Apr 2016 16:52:42 +0300 Subject: [PATCH 071/174] Improve notifications about damaged I/O subsystem Follow-up to https://github.com/rabbitmq/rabbitmq-server/pull/747 It's the best thing that can be done. Without `exit` anything inside rabbit could hang - most prone to it is the `start_app` action. Crashing loudly will be helpful, because during 2016 there were at least 2 production investigations about that issue. And here is the script that I was using for reproduction: https://gist.github.com/binarin/edd4102b7262d53e2cf09c76d5ca8e9e --- src/rabbit_log.erl | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index ed73a293ca33..5b5468cab749 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -96,7 +96,7 @@ with_local_io(Fun) -> Node = node(), case node(GL) of Node -> Fun(); - _ -> set_group_leader_to_user(), + _ -> set_group_leader_to_user_safely(whereis(user)), try Fun() after @@ -104,10 +104,12 @@ with_local_io(Fun) -> end end. -set_group_leader_to_user() -> - case whereis(user) of - undefined -> - warning("the 'user' I/O process has terminated, some features will fail until Erlang VM is restarted"); - User -> - group_leader(User, self()) - end. +set_group_leader_to_user_safely(undefined) -> + handle_damaged_io_system(); +set_group_leader_to_user_safely(User) when is_pid(User) -> + group_leader(User, self()). + +handle_damaged_io_system() -> + Msg = "Erlang VM I/O system is damaged, restart needed~n", + io:format(standard_error, Msg, []), + exit(erlang_vm_restart_needed). From 28a814b572d624a918bc1e66e4e3d9e7a019faf1 Mon Sep 17 00:00:00 2001 From: Gabriele Santomaggio Date: Wed, 27 Apr 2016 11:23:13 +0200 Subject: [PATCH 072/174] warning message during partition --- src/rabbit_node_monitor.erl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/rabbit_node_monitor.erl b/src/rabbit_node_monitor.erl index 6f41836b98ee..43e6f6e4c74e 100644 --- a/src/rabbit_node_monitor.erl +++ b/src/rabbit_node_monitor.erl @@ -414,7 +414,11 @@ handle_cast({check_partial_partition, Node, Rep, NodeGUID, MyGUID, RepGUID}, fun () -> case rpc:call(Node, rabbit, is_running, []) of {badrpc, _} -> ok; - _ -> cast(Rep, {partial_partition, + _ -> + rabbit_log:warning("Unexpected running node:" + " ~p is still running ~n", + [Node]), + cast(Rep, {partial_partition, Node, node(), RepGUID}) end end); From e6c5dcb1189740b850a3cc32b6233739cce6b05e Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Mon, 25 Apr 2016 17:25:29 +0100 Subject: [PATCH 073/174] Ignore non-array x-dead header --- src/rabbit_dead_letter.erl | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/rabbit_dead_letter.erl b/src/rabbit_dead_letter.erl index 252405d62b03..07cb954b2025 100644 --- a/src/rabbit_dead_letter.erl +++ b/src/rabbit_dead_letter.erl @@ -139,7 +139,16 @@ update_x_death_header(Info, Headers) -> end, rabbit_misc:set_table_value( Headers, <<"x-death">>, array, - [{table, rabbit_misc:sort_field_table(Info1)} | Others]) + [{table, rabbit_misc:sort_field_table(Info1)} | Others]); + {<<"x-death">>, InvalidType, Header} -> + rabbit_log:notice("Invalid x-death header type: ~p." + " Ignoring header ~p~n", + [InvalidType, Header]), + %% If x-death field type is invalid it is ignored + %% and new array is created + rabbit_misc:set_table_value( + Headers, <<"x-death">>, array, + [{table, [{<<"count">>, long, 1} | Info]}]) end. ensure_xdeath_event_count({table, Info}, InitialVal) when InitialVal >= 1 -> From cb1fc3b7c588f452b8d52bfbcc2df3d96cd22486 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Wed, 27 Apr 2016 11:02:35 +0100 Subject: [PATCH 074/174] Log invalid x-death header as warning --- src/rabbit_dead_letter.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit_dead_letter.erl b/src/rabbit_dead_letter.erl index 07cb954b2025..479c8a316f3d 100644 --- a/src/rabbit_dead_letter.erl +++ b/src/rabbit_dead_letter.erl @@ -141,9 +141,9 @@ update_x_death_header(Info, Headers) -> Headers, <<"x-death">>, array, [{table, rabbit_misc:sort_field_table(Info1)} | Others]); {<<"x-death">>, InvalidType, Header} -> - rabbit_log:notice("Invalid x-death header type: ~p." - " Ignoring header ~p~n", - [InvalidType, Header]), + rabbit_log:warning("Death Invalid x-death header type: ~p." + " Ignoring header ~p~n", + [InvalidType, Header]), %% If x-death field type is invalid it is ignored %% and new array is created rabbit_misc:set_table_value( From 314c975b8dd9c202f10bfd1c86f7374af9cddab4 Mon Sep 17 00:00:00 2001 From: Gabriele Santomaggio Date: Wed, 27 Apr 2016 16:20:34 +0200 Subject: [PATCH 075/174] modified the message --- src/rabbit_node_monitor.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rabbit_node_monitor.erl b/src/rabbit_node_monitor.erl index 43e6f6e4c74e..976f4a4b2f23 100644 --- a/src/rabbit_node_monitor.erl +++ b/src/rabbit_node_monitor.erl @@ -415,8 +415,9 @@ handle_cast({check_partial_partition, Node, Rep, NodeGUID, MyGUID, RepGUID}, case rpc:call(Node, rabbit, is_running, []) of {badrpc, _} -> ok; _ -> - rabbit_log:warning("Unexpected running node:" - " ~p is still running ~n", + rabbit_log:warning("Received a 'DOWN' message" + " from ~p but still can" + " communicate with it ~n", [Node]), cast(Rep, {partial_partition, Node, node(), RepGUID}) From 702956f2c69cba624146180f4c715912012392d2 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Wed, 27 Apr 2016 12:19:59 -0500 Subject: [PATCH 076/174] Wording --- src/rabbit_dead_letter.erl | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/rabbit_dead_letter.erl b/src/rabbit_dead_letter.erl index 479c8a316f3d..8d426f072a1d 100644 --- a/src/rabbit_dead_letter.erl +++ b/src/rabbit_dead_letter.erl @@ -141,11 +141,14 @@ update_x_death_header(Info, Headers) -> Headers, <<"x-death">>, array, [{table, rabbit_misc:sort_field_table(Info1)} | Others]); {<<"x-death">>, InvalidType, Header} -> - rabbit_log:warning("Death Invalid x-death header type: ~p." - " Ignoring header ~p~n", + rabbit_log:warning("Message has invalid x-death header (type: ~p)." + " Resetting header ~p~n", [InvalidType, Header]), - %% If x-death field type is invalid it is ignored - %% and new array is created + %% if x-death is something other than an array (list) + %% then we reset it: this happens when some clients consume + %% a message and re-publish is, converting header values + %% to strings, intentionally or not. + %% See rabbitmq/rabbitmq-server#767 for details. rabbit_misc:set_table_value( Headers, <<"x-death">>, array, [{table, [{<<"count">>, long, 1} | Info]}]) From 77c2d0a3d1798e7697315bd2df9215bbe28be2bf Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Fri, 22 Apr 2016 14:35:02 +0100 Subject: [PATCH 077/174] set broker requirements during package gen References #735. --- Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 924160849672..44273b7a5c99 100644 --- a/Makefile +++ b/Makefile @@ -275,7 +275,10 @@ $(SOURCE_DIST): $(ERLANG_MK_RECURSIVE_DEPS_LIST) $(verbose) cat packaging/common/LICENSE.tail >> $@/LICENSE $(verbose) find $@/deps/licensing -name 'LICENSE-*' -exec cp '{}' $@ \; $(verbose) for file in $$(find $@ -name '*.app.src'); do \ - sed -E -i.bak -e 's/[{]vsn[[:blank:]]*,[[:blank:]]*""[[:blank:]]*}/{vsn, "$(VERSION)"}/' $$file; \ + sed -E -i.bak \ + -e 's/[{]vsn[[:blank:]]*,[[:blank:]]*""[[:blank:]]*}/{vsn, "$(VERSION)"}/' \ + -e 's/[{]broker_version_requirements[[:blank:]]*,[[:blank:]]*\[\][[:blank:]]*}/{broker_version_requirements, ["$(VERSION)"]}/' \ + $$file; \ rm $$file.bak; \ done $(verbose) echo "$(PROJECT) $$(git rev-parse HEAD) $$(git describe --tags --exact-match 2>/dev/null || git symbolic-ref -q --short HEAD)" > $@/git-revisions.txt From 2c00a20c1253e829b0dba7fd5cb94f5698f04dd0 Mon Sep 17 00:00:00 2001 From: Gabriele Santomaggio Date: Wed, 27 Apr 2016 17:49:17 +0200 Subject: [PATCH 078/174] reconfigure ERLANG_HOME during the update The `un.StrContains` macro is now useless, so we remove it. Fixes #690. --- packaging/windows-exe/rabbitmq_nsi.in | 69 +++------------------------ 1 file changed, 6 insertions(+), 63 deletions(-) diff --git a/packaging/windows-exe/rabbitmq_nsi.in b/packaging/windows-exe/rabbitmq_nsi.in index e66d2a99eda7..4c47e8662818 100644 --- a/packaging/windows-exe/rabbitmq_nsi.in +++ b/packaging/windows-exe/rabbitmq_nsi.in @@ -13,57 +13,6 @@ !define MUI_FINISHPAGE_NOAUTOCLOSE !define MUI_UNFINISHPAGE_NOAUTOCLOSE -;-------------------------------- -; Third-party functions -; StrContains -; This function does a case sensitive searches for an occurrence of a substring in a string. -; It returns the substring if it is found. -; Otherwise it returns null(""). -; Written by kenglish_hi -; Adapted from StrReplace written by dandaman32 - - -Var STR_HAYSTACK -Var STR_NEEDLE -Var STR_CONTAINS_VAR_1 -Var STR_CONTAINS_VAR_2 -Var STR_CONTAINS_VAR_3 -Var STR_CONTAINS_VAR_4 -Var STR_RETURN_VAR - -Function un.StrContains - Exch $STR_NEEDLE - Exch 1 - Exch $STR_HAYSTACK - ; Uncomment to debug - ;MessageBox MB_OK 'STR_NEEDLE = $STR_NEEDLE STR_HAYSTACK = $STR_HAYSTACK ' - StrCpy $STR_RETURN_VAR "" - StrCpy $STR_CONTAINS_VAR_1 -1 - StrLen $STR_CONTAINS_VAR_2 $STR_NEEDLE - StrLen $STR_CONTAINS_VAR_4 $STR_HAYSTACK - loop: - IntOp $STR_CONTAINS_VAR_1 $STR_CONTAINS_VAR_1 + 1 - StrCpy $STR_CONTAINS_VAR_3 $STR_HAYSTACK $STR_CONTAINS_VAR_2 $STR_CONTAINS_VAR_1 - StrCmp $STR_CONTAINS_VAR_3 $STR_NEEDLE found - StrCmp $STR_CONTAINS_VAR_1 $STR_CONTAINS_VAR_4 done - Goto loop - found: - StrCpy $STR_RETURN_VAR $STR_NEEDLE - Goto done - done: - Pop $STR_NEEDLE ;Prevent "invalid opcode" errors and keep the - Exch $STR_RETURN_VAR -FunctionEnd - -!macro _un.StrContainsConstructor OUT NEEDLE HAYSTACK - Push `${HAYSTACK}` - Push `${NEEDLE}` - Call un.StrContains - Pop `${OUT}` -!macroend - -!define un.StrContains '!insertmacro "_un.StrContainsConstructor"' - ;-------------------------------- ; The name of the installer @@ -214,12 +163,6 @@ LangString DESC_RabbitStartMenu ${LANG_ENGLISH} "Add some useful links to the st Section "Uninstall" - ; Check if reinstall will occur immediately - don't remove ERLANG_HOME - Var /GLOBAL REINSTALLFLAG - Var /GLOBAL ISREINSTALL - ${GetParameters} $REINSTALLFLAG - ${un.StrContains} $ISREINSTALL "reinstall" $REINSTALLFLAG - ; Remove registry keys DeleteRegKey HKLM ${uninstall} DeleteRegKey HKLM "SOFTWARE\VMware, Inc.\RabbitMQ Server" @@ -239,12 +182,9 @@ Section "Uninstall" ; Remove start menu items RMDir /r "$SMPROGRAMS\RabbitMQ Server" - ; If reinstalling immediately (e.g. program update) don't remove ERLANG_HOME environment variable - ${If} $ISREINSTALL == "" - DeleteRegValue ${env_hklm} ERLANG_HOME - SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000 - ${EndIf} - + DeleteRegValue ${env_hklm} ERLANG_HOME + SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000 + SectionEnd ;-------------------------------- @@ -277,6 +217,9 @@ Function .onInit ExecWait '"$INSTDIR\uninstall.exe" /S _?=$INSTDIR' Delete "$INSTDIR\uninstall.exe" RMDir "$INSTDIR" + ; the unistaller removes the ERLANG_HOME. + ; called again since this is an update + Call findErlang ${EndIf} FunctionEnd From 6f11f14d183050b8804c0adf304bf7468586296e Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Thu, 28 Apr 2016 18:09:01 +0100 Subject: [PATCH 079/174] Drop ACKs of messages from lost members of the ring * when partial partitions happen, inconsistent views can take place where both nodes in partial partition see the live node as their left (A -> C and B -> C), and override each other view in mnesia. Thus, when C tries to compare the ACKs finds missing elements on A or B data. We'll drop these as they won't be answered because the origin node is going into pause_minority and stops. --- src/gm.erl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/gm.erl b/src/gm.erl index 199cf7c4de16..1e4168c0e69a 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -1338,7 +1338,11 @@ find_common(A, B, Common) -> {{{value, Val}, A1}, {{value, Val}, B1}} -> find_common(A1, B1, queue:in(Val, Common)); {{empty, _A}, _} -> - {Common, B} + {Common, B}; + {_, {_, B1}} -> + find_common(A, B1, Common); + {{_, A1}, _} -> + find_common(A1, B, Common) end. From f77fcd51442e75711ee753d14b833f99122a93af Mon Sep 17 00:00:00 2001 From: Gabriele Santomaggio Date: Wed, 27 Apr 2016 11:23:13 +0200 Subject: [PATCH 080/174] warning message during partition --- src/rabbit_node_monitor.erl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/rabbit_node_monitor.erl b/src/rabbit_node_monitor.erl index 6f41836b98ee..43e6f6e4c74e 100644 --- a/src/rabbit_node_monitor.erl +++ b/src/rabbit_node_monitor.erl @@ -414,7 +414,11 @@ handle_cast({check_partial_partition, Node, Rep, NodeGUID, MyGUID, RepGUID}, fun () -> case rpc:call(Node, rabbit, is_running, []) of {badrpc, _} -> ok; - _ -> cast(Rep, {partial_partition, + _ -> + rabbit_log:warning("Unexpected running node:" + " ~p is still running ~n", + [Node]), + cast(Rep, {partial_partition, Node, node(), RepGUID}) end end); From 69312f8e0eb4b6315a0ae2604cb8492df7043401 Mon Sep 17 00:00:00 2001 From: Gabriele Santomaggio Date: Wed, 27 Apr 2016 16:20:34 +0200 Subject: [PATCH 081/174] modified the message --- src/rabbit_node_monitor.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rabbit_node_monitor.erl b/src/rabbit_node_monitor.erl index 43e6f6e4c74e..976f4a4b2f23 100644 --- a/src/rabbit_node_monitor.erl +++ b/src/rabbit_node_monitor.erl @@ -415,8 +415,9 @@ handle_cast({check_partial_partition, Node, Rep, NodeGUID, MyGUID, RepGUID}, case rpc:call(Node, rabbit, is_running, []) of {badrpc, _} -> ok; _ -> - rabbit_log:warning("Unexpected running node:" - " ~p is still running ~n", + rabbit_log:warning("Received a 'DOWN' message" + " from ~p but still can" + " communicate with it ~n", [Node]), cast(Rep, {partial_partition, Node, node(), RepGUID}) From 7f348db0dffb443baf4468aa038d3888819b8132 Mon Sep 17 00:00:00 2001 From: Alexey Lebedeff Date: Tue, 10 May 2016 11:01:37 +0300 Subject: [PATCH 082/174] Don't scare ops with empty list of failed plugins --- src/rabbit_plugins.erl | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index 8f7319182edf..47574a9d55cb 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -253,8 +253,7 @@ prepare_plugins(Enabled) -> Wanted = dependencies(false, Enabled, AllPlugins), WantedPlugins = lookup_plugins(Wanted, AllPlugins), {ValidPlugins, Problems} = validate_plugins(WantedPlugins), - %% TODO: error message formatting - rabbit_log:warning(format_invalid_plugins(Problems)), + maybe_warn_about_invalid_plugins(Problems), case filelib:ensure_dir(ExpandDir ++ "/") of ok -> ok; {error, E2} -> throw({error, {cannot_create_plugins_expand_dir, @@ -266,6 +265,13 @@ prepare_plugins(Enabled) -> PluginAppDescPath <- filelib:wildcard(ExpandDir ++ "/*/ebin/*.app")], Wanted. +maybe_warn_about_invalid_plugins([]) -> + ok; +maybe_warn_about_invalid_plugins(InvalidPlugins) -> + %% TODO: error message formatting + rabbit_log:warning(format_invalid_plugins(InvalidPlugins)). + + format_invalid_plugins(InvalidPlugins) -> lists:flatten(["Failed to enable some plugins: \r\n" | [format_invalid_plugin(Plugin) From a004f197905b463a2236c95f97b72fdbd2774163 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Thu, 12 May 2016 10:34:15 +0100 Subject: [PATCH 083/174] Bump minimal erlang version to 18.3 --- .travis.yml | 1 + src/file_handle_cache.erl | 14 +++++++------- src/file_handle_cache_stats.erl | 6 +++--- src/gm.erl | 4 ++-- src/pg2_fixed.erl | 4 ++-- src/rabbit.erl | 2 +- src/rabbit_amqqueue_process.erl | 8 ++++---- src/rabbit_dead_letter.erl | 2 +- src/rabbit_error_logger.erl | 2 +- src/rabbit_hipe.erl | 6 +++--- src/rabbit_mirror_queue_mode_exactly.erl | 4 ++-- src/rabbit_mirror_queue_sync.erl | 8 ++++---- src/rabbit_password.erl | 4 ++-- src/rabbit_queue_consumers.erl | 10 +++++----- src/rabbit_queue_location_random.erl | 2 +- src/rabbit_variable_queue.erl | 6 +++--- 16 files changed, 42 insertions(+), 41 deletions(-) diff --git a/.travis.yml b/.travis.yml index da19ebc30273..54ef56a5e62d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,6 +13,7 @@ otp_release: - "R16B03-1" - "17.5" - "18.0" + - "18.3" env: matrix: - GROUP=1 GROUP=2 diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index d5f0cbee6f5b..e9754821e8cd 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -537,12 +537,12 @@ clear(Ref) -> end). set_maximum_since_use(MaximumAge) -> - Now = time_compat:monotonic_time(), + Now = erlang:monotonic_time(), case lists:foldl( fun ({{Ref, fhc_handle}, Handle = #handle { hdl = Hdl, last_used_at = Then }}, Rep) -> case Hdl =/= closed andalso - time_compat:convert_time_unit(Now - Then, + erlang:convert_time_unit(Now - Then, native, micro_seconds) >= MaximumAge of @@ -715,7 +715,7 @@ get_or_reopen(RefNewOrReopens) -> {ok, [Handle || {_Ref, Handle} <- OpenHdls]}; {OpenHdls, ClosedHdls} -> Oldest = oldest(get_age_tree(), - fun () -> time_compat:monotonic_time() end), + fun () -> erlang:monotonic_time() end), case gen_server2:call(?SERVER, {open, self(), length(ClosedHdls), Oldest}, infinity) of ok -> @@ -751,7 +751,7 @@ reopen([{Ref, NewOrReopen, Handle = #handle { hdl = closed, end, case prim_file:open(Path, Mode) of {ok, Hdl} -> - Now = time_compat:monotonic_time(), + Now = erlang:monotonic_time(), {{ok, _Offset}, Handle1} = maybe_seek(Offset, reset_read_buffer( Handle#handle{hdl = Hdl, @@ -787,7 +787,7 @@ sort_handles([{Ref, _} | RefHdls], RefHdlsA, [{Ref, Handle} | RefHdlsB], Acc) -> sort_handles(RefHdls, RefHdlsA, RefHdlsB, [Handle | Acc]). put_handle(Ref, Handle = #handle { last_used_at = Then }) -> - Now = time_compat:monotonic_time(), + Now = erlang:monotonic_time(), age_tree_update(Then, Now, Ref), put({Ref, fhc_handle}, Handle #handle { last_used_at = Now }). @@ -1429,14 +1429,14 @@ reduce(State = #fhc_state { open_pending = OpenPending, elders = Elders, clients = Clients, timer_ref = TRef }) -> - Now = time_compat:monotonic_time(), + Now = erlang:monotonic_time(), {CStates, Sum, ClientCount} = ets:foldl(fun ({Pid, Eldest}, {CStatesAcc, SumAcc, CountAcc} = Accs) -> [#cstate { pending_closes = PendingCloses, opened = Opened, blocked = Blocked } = CState] = ets:lookup(Clients, Pid), - TimeDiff = time_compat:convert_time_unit( + TimeDiff = erlang:convert_time_unit( Now - Eldest, native, micro_seconds), case Blocked orelse PendingCloses =:= Opened of true -> Accs; diff --git a/src/file_handle_cache_stats.erl b/src/file_handle_cache_stats.erl index ccf1e49662fe..8f368a840537 100644 --- a/src/file_handle_cache_stats.erl +++ b/src/file_handle_cache_stats.erl @@ -59,8 +59,8 @@ get() -> lists:sort(ets:tab2list(?TABLE)). timer_tc(Thunk) -> - T1 = time_compat:monotonic_time(), + T1 = erlang:monotonic_time(), Res = Thunk(), - T2 = time_compat:monotonic_time(), - Diff = time_compat:convert_time_unit(T2 - T1, native, micro_seconds), + T2 = erlang:monotonic_time(), + Diff = erlang:convert_time_unit(T2 - T1, native, micro_seconds), {Diff, Res}. diff --git a/src/gm.erl b/src/gm.erl index 1e4168c0e69a..a83d8d1932a3 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -552,8 +552,8 @@ forget_group(GroupName) -> init([GroupName, Module, Args, TxnFun]) -> put(process_name, {?MODULE, GroupName}), _ = random:seed(erlang:phash2([node()]), - time_compat:monotonic_time(), - time_compat:unique_integer()), + erlang:monotonic_time(), + erlang:unique_integer()), Self = make_member(GroupName), gen_server2:cast(self(), join), {ok, #state { self = Self, diff --git a/src/pg2_fixed.erl b/src/pg2_fixed.erl index 222a0bc8497d..73c05819d431 100644 --- a/src/pg2_fixed.erl +++ b/src/pg2_fixed.erl @@ -149,11 +149,11 @@ get_closest_pid(Name) -> case get_members(Name) of [] -> {error, {no_process, Name}}; Members -> - X = time_compat:erlang_system_time(micro_seconds), + X = erlang:system_time(micro_seconds), lists:nth((X rem length(Members))+1, Members) end; Members when is_list(Members) -> - X = time_compat:erlang_system_time(micro_seconds), + X = erlang:system_time(micro_seconds), lists:nth((X rem length(Members))+1, Members); Else -> Else diff --git a/src/rabbit.erl b/src/rabbit.erl index 7816cd53aa98..b1f83729b8ce 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -731,7 +731,7 @@ log_broker_started(Plugins) -> erts_version_check() -> ERTSVer = erlang:system_info(version), - OTPRel = erlang:system_info(otp_release), + OTPRel = rabbit_misc:otp_release(), case rabbit_misc:version_compare(?ERTS_MINIMUM, ERTSVer, lte) of true when ?ERTS_MINIMUM =/= ERTSVer -> ok; diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 6d3bf892b085..3ee14e4f7db6 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -431,7 +431,7 @@ ensure_ttl_timer(undefined, State) -> State; ensure_ttl_timer(Expiry, State = #q{ttl_timer_ref = undefined, args_policy_version = Version}) -> - After = (case Expiry - time_compat:os_system_time(micro_seconds) of + After = (case Expiry - os:system_time(micro_seconds) of V when V > 0 -> V + 999; %% always fire later _ -> 0 end) div 1000, @@ -757,7 +757,7 @@ calculate_msg_expiry(#basic_message{content = Content}, TTL) -> {ok, MsgTTL} = rabbit_basic:parse_expiration(Props), case lists:min([TTL, MsgTTL]) of undefined -> undefined; - T -> time_compat:os_system_time(micro_seconds) + T * 1000 + T -> os:system_time(micro_seconds) + T * 1000 end. %% Logically this function should invoke maybe_send_drained/2. @@ -768,7 +768,7 @@ calculate_msg_expiry(#basic_message{content = Content}, TTL) -> drop_expired_msgs(State) -> case is_empty(State) of true -> State; - false -> drop_expired_msgs(time_compat:os_system_time(micro_seconds), + false -> drop_expired_msgs(os:system_time(micro_seconds), State) end. @@ -1358,7 +1358,7 @@ handle_pre_hibernate(State = #q{backing_queue = BQ, State, #q.stats_timer, fun () -> emit_stats(State, [{idle_since, - time_compat:os_system_time(milli_seconds)}, + os:system_time(milli_seconds)}, {consumer_utilisation, ''}]) end), State1 = rabbit_event:stop_stats_timer(State#q{backing_queue_state = BQS3}, diff --git a/src/rabbit_dead_letter.erl b/src/rabbit_dead_letter.erl index 8d426f072a1d..b5182ee2e00f 100644 --- a/src/rabbit_dead_letter.erl +++ b/src/rabbit_dead_letter.erl @@ -53,7 +53,7 @@ make_msg(Msg = #basic_message{content = Content, _ -> {[RK], fun (H) -> lists:keydelete(<<"CC">>, 1, H) end} end, ReasonBin = list_to_binary(atom_to_list(Reason)), - TimeSec = time_compat:os_system_time(seconds), + TimeSec = os:system_time(seconds), PerMsgTTL = per_msg_ttl_header(Content#content.properties), HeadersFun2 = fun (Headers) -> diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl index efe8495299e2..0103d4e50311 100644 --- a/src/rabbit_error_logger.erl +++ b/src/rabbit_error_logger.erl @@ -100,7 +100,7 @@ publish(_Other, _Format, _Data, _State) -> publish1(RoutingKey, Format, Data, LogExch) -> %% 0-9-1 says the timestamp is a "64 bit POSIX timestamp". That's %% second resolution, not millisecond. - Timestamp = time_compat:os_system_time(seconds), + Timestamp = os:system_time(seconds), Args = [truncate:term(A, ?LOG_TRUNC) || A <- Data], Headers = [{<<"node">>, longstr, list_to_binary(atom_to_list(node()))}], diff --git a/src/rabbit_hipe.erl b/src/rabbit_hipe.erl index 05b5f3719d1d..cbd9181e6a19 100644 --- a/src/rabbit_hipe.erl +++ b/src/rabbit_hipe.erl @@ -68,7 +68,7 @@ do_hipe_compile(HipeModules) -> Count = length(HipeModules), io:format("~nHiPE compiling: |~s|~n |", [string:copies("-", Count)]), - T1 = time_compat:monotonic_time(), + T1 = erlang:monotonic_time(), %% We use code:get_object_code/1 below to get the beam binary, %% instead of letting hipe get it itself, because hipe:c/{1,2} %% expects the given filename to actually exist on disk: it does not @@ -92,8 +92,8 @@ do_hipe_compile(HipeModules) -> {'DOWN', MRef, process, _, normal} -> ok; {'DOWN', MRef, process, _, Reason} -> exit(Reason) end || {_Pid, MRef} <- PidMRefs], - T2 = time_compat:monotonic_time(), - Duration = time_compat:convert_time_unit(T2 - T1, native, seconds), + T2 = erlang:monotonic_time(), + Duration = erlang:convert_time_unit(T2 - T1, native, seconds), io:format("|~n~nCompiled ~B modules in ~Bs~n", [Count, Duration]), {ok, Count, Duration}. diff --git a/src/rabbit_mirror_queue_mode_exactly.erl b/src/rabbit_mirror_queue_mode_exactly.erl index 4721ad613630..28ed8ca4635a 100644 --- a/src/rabbit_mirror_queue_mode_exactly.erl +++ b/src/rabbit_mirror_queue_mode_exactly.erl @@ -46,8 +46,8 @@ suggested_queue_nodes(Count, MNode, SNodes, _SSNodes, Poss) -> shuffle(L) -> random:seed(erlang:phash2([node()]), - time_compat:monotonic_time(), - time_compat:unique_integer()), + erlang:monotonic_time(), + erlang:unique_integer()), {_, L1} = lists:unzip(lists:keysort(1, [{random:uniform(), N} || N <- L])), L1. diff --git a/src/rabbit_mirror_queue_sync.erl b/src/rabbit_mirror_queue_sync.erl index a97a9b50c86a..898aa5abcf46 100644 --- a/src/rabbit_mirror_queue_sync.erl +++ b/src/rabbit_mirror_queue_sync.erl @@ -108,7 +108,7 @@ master_batch_go0(Args, BatchSize, BQ, BQS) -> false -> {cont, Acc1} end end, - FoldAcc = {[], 0, {0, BQ:depth(BQS)}, time_compat:monotonic_time()}, + FoldAcc = {[], 0, {0, BQ:depth(BQS)}, erlang:monotonic_time()}, bq_fold(FoldFun, FoldAcc, Args, BQ, BQS). master_batch_send({Syncer, Ref, Log, HandleInfo, EmitStats, Parent}, @@ -168,12 +168,12 @@ stop_syncer(Syncer, Msg) -> end. maybe_emit_stats(Last, I, EmitStats, Log) -> - Interval = time_compat:convert_time_unit( - time_compat:monotonic_time() - Last, native, micro_seconds), + Interval = erlang:convert_time_unit( + erlang:monotonic_time() - Last, native, micro_seconds), case Interval > ?SYNC_PROGRESS_INTERVAL of true -> EmitStats({syncing, I}), Log("~p messages", [I]), - time_compat:monotonic_time(); + erlang:monotonic_time(); false -> Last end. diff --git a/src/rabbit_password.erl b/src/rabbit_password.erl index d5b0945de977..8d5cf8d69e24 100644 --- a/src/rabbit_password.erl +++ b/src/rabbit_password.erl @@ -36,8 +36,8 @@ hash(HashingMod, Cleartext) -> generate_salt() -> random:seed(erlang:phash2([node()]), - time_compat:monotonic_time(), - time_compat:unique_integer()), + erlang:monotonic_time(), + erlang:unique_integer()), Salt = random:uniform(16#ffffffff), <>. diff --git a/src/rabbit_queue_consumers.erl b/src/rabbit_queue_consumers.erl index 5b5c9b30744a..6200f9d2c12d 100644 --- a/src/rabbit_queue_consumers.erl +++ b/src/rabbit_queue_consumers.erl @@ -100,7 +100,7 @@ new() -> #state{consumers = priority_queue:new(), use = {active, - time_compat:monotonic_time(micro_seconds), + erlang:monotonic_time(micro_seconds), 1.0}}. max_active_priority(#state{consumers = Consumers}) -> @@ -350,9 +350,9 @@ drain_mode(true) -> drain; drain_mode(false) -> manual. utilisation(#state{use = {active, Since, Avg}}) -> - use_avg(time_compat:monotonic_time(micro_seconds) - Since, 0, Avg); + use_avg(erlang:monotonic_time(micro_seconds) - Since, 0, Avg); utilisation(#state{use = {inactive, Since, Active, Avg}}) -> - use_avg(Active, time_compat:monotonic_time(micro_seconds) - Since, Avg). + use_avg(Active, erlang:monotonic_time(micro_seconds) - Since, Avg). %%---------------------------------------------------------------------------- @@ -459,10 +459,10 @@ update_use({inactive, _, _, _} = CUInfo, inactive) -> update_use({active, _, _} = CUInfo, active) -> CUInfo; update_use({active, Since, Avg}, inactive) -> - Now = time_compat:monotonic_time(micro_seconds), + Now = erlang:monotonic_time(micro_seconds), {inactive, Now, Now - Since, Avg}; update_use({inactive, Since, Active, Avg}, active) -> - Now = time_compat:monotonic_time(micro_seconds), + Now = erlang:monotonic_time(micro_seconds), {active, Now, use_avg(Active, Now - Since, Avg)}. use_avg(0, 0, Avg) -> diff --git a/src/rabbit_queue_location_random.erl b/src/rabbit_queue_location_random.erl index 2579cbb2b1a3..73d509bf3336 100644 --- a/src/rabbit_queue_location_random.erl +++ b/src/rabbit_queue_location_random.erl @@ -39,6 +39,6 @@ description() -> queue_master_location(#amqqueue{}) -> Cluster = rabbit_queue_master_location_misc:all_nodes(), - RandomPos = erlang:phash2(time_compat:monotonic_time(), length(Cluster)), + RandomPos = erlang:phash2(erlang:monotonic_time(), length(Cluster)), MasterNode = lists:nth(RandomPos + 1, Cluster), {ok, MasterNode}. diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 45dde112a56c..5b86cbd3d1d1 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -774,7 +774,7 @@ update_rates(State = #vqstate{ in_counter = InCount, ack_in = AckInRate, ack_out = AckOutRate, timestamp = TS }}) -> - Now = time_compat:monotonic_time(), + Now = erlang:monotonic_time(), Rates = #rates { in = update_rate(Now, TS, InCount, InRate), out = update_rate(Now, TS, OutCount, OutRate), @@ -789,7 +789,7 @@ update_rates(State = #vqstate{ in_counter = InCount, rates = Rates }. update_rate(Now, TS, Count, Rate) -> - Time = time_compat:convert_time_unit(Now - TS, native, micro_seconds) / + Time = erlang:convert_time_unit(Now - TS, native, micro_seconds) / ?MICROS_PER_SECOND, if Time == 0 -> Rate; @@ -1287,7 +1287,7 @@ init(IsDurable, IndexState, DeltaCount, DeltaBytes, Terms, count = DeltaCount1, end_seq_id = NextSeqId }) end, - Now = time_compat:monotonic_time(), + Now = erlang:monotonic_time(), IoBatchSize = rabbit_misc:get_env(rabbit, msg_store_io_batch_size, ?IO_BATCH_SIZE), From ddd0d2b6212001d1b2be9585240af63239a16e3c Mon Sep 17 00:00:00 2001 From: Liubov Efremova Date: Thu, 12 May 2016 18:55:21 +0300 Subject: [PATCH 084/174] Private attributes usage in rabbitmq script Required Pacemaker >= 1.1.13. (The command 'attrd_updater' have '-p' option only since this version). There are three types of rabbitmq attributes for pacemaker nodes: -'rabbit-master' -'rabbit-start-time' - timeouts: -'rabbit_list_channels_timeouts' -'rabbit_get_alarms_timeouts' -'rabbit_list_queues_timeouts' Attributes with names 'rabbit-master' and 'rabbit-start-time' should be public because we monitor this attributes in cycle for all nodes in our script. All timeouts attributes were changed to private to avoid unnecessary transitions. Also, --lifetime and --node options were removed for attrd_updater as 'lifetime' for this command is always 'reboot' and 'node' default value is local one. --- scripts/rabbitmq-server-ha.ocf | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/scripts/rabbitmq-server-ha.ocf b/scripts/rabbitmq-server-ha.ocf index 06eeb508374d..8a2075d9f0c6 100755 --- a/scripts/rabbitmq-server-ha.ocf +++ b/scripts/rabbitmq-server-ha.ocf @@ -1358,18 +1358,18 @@ is_master() { # separately. The second argument is used to distingush them. check_timeouts() { local op_rc=$1 - local crm_attr_name=$2 + local timeouts_attr_name=$2 local op_name=$3 if [ $op_rc -ne 124 -a $op_rc -ne 137 ]; then - ocf_run crm_attribute -N $THIS_PCMK_NODE -l reboot --name $crm_attr_name --update 0 + ocf_run attrd_updater -p --name $timeouts_attr_name --update 0 return 0 fi local count - count=`crm_attribute -N $THIS_PCMK_NODE -l reboot --name $crm_attr_name --query 2>/dev/null` + count=`attrd_updater --name $timeouts_attr_name --query 2>/dev/null` if [ $? -ne 0 ]; then - # the crm_attribute exited with error. In that case most probably it printed garbage + # the attrd_updater exited with error. In that case most probably it printed garbage # instead of the number we need. So defensively assume that it is zero. count=0 @@ -1378,9 +1378,9 @@ check_timeouts() { count=$((count+1)) # There is a slight chance that this piece of code will be executed twice simultaneously. - # As a result, $crm_attr_name's value will be one less than it should be. But we don't need + # As a result, $timeouts_attr_name's value will be one less than it should be. But we don't need # precise calculation here. - ocf_run crm_attribute -N $THIS_PCMK_NODE -l reboot --name $crm_attr_name --update $count + ocf_run attrd_updater -p --name $timeouts_attr_name --update $count if [ $count -lt $OCF_RESKEY_max_rabbitmqctl_timeouts ]; then ocf_log warn "${LH} 'rabbitmqctl $op_name' timed out $count of max. $OCF_RESKEY_max_rabbitmqctl_timeouts time(s) in a row. Doing nothing for now." @@ -1670,9 +1670,9 @@ action_start() { return $OCF_SUCCESS fi - ocf_run crm_attribute -N $THIS_PCMK_NODE -l reboot --name 'rabbit_list_channels_timeouts' --update '0' - ocf_run crm_attribute -N $THIS_PCMK_NODE -l reboot --name 'rabbit_get_alarms_timeouts' --update '0' - ocf_run crm_attribute -N $THIS_PCMK_NODE -l reboot --name 'rabbit_list_queues_timeouts' --update '0' + ocf_run attrd_updater -p --name 'rabbit_list_channels_timeouts' --update '0' + ocf_run attrd_updater -p --name 'rabbit_get_alarms_timeouts' --update '0' + ocf_run attrd_updater -p --name 'rabbit_list_queues_timeouts' --update '0' ocf_log info "${LH} Deleting start time attribute" ocf_run crm_attribute -N $THIS_PCMK_NODE -l reboot --name 'rabbit-start-time' --delete ocf_log info "${LH} Deleting master attribute" From e9e640284edf780ee669e547c28a6e9ad6c839ba Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Fri, 13 May 2016 10:44:35 +0800 Subject: [PATCH 085/174] (c) year --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d64ab34a16cd..ae6f9b2d0142 100644 --- a/README.md +++ b/README.md @@ -44,4 +44,4 @@ See [building RabbitMQ server from source](http://www.rabbitmq.com/build-server. ## Copyright -(c) Pivotal Software Inc., 2007-2015. +(c) Pivotal Software Inc., 2007-2016. From 87418da7b90c789310e6e258078fed94c7a7ce01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Fri, 13 May 2016 10:53:35 +0200 Subject: [PATCH 086/174] make source-dist: Exclude default $(PACKAGES_DIR) --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 833334dcd6c0..1d5ce3f2b499 100644 --- a/Makefile +++ b/Makefile @@ -221,6 +221,7 @@ RSYNC_FLAGS += -a $(RSYNC_V) \ --exclude 'plugins/' \ --exclude '$(notdir $(DIST_DIR))/' \ --exclude '/$(notdir $(PACKAGES_DIR))/' \ + --exclude '/PACKAGES/' \ --exclude '/cowboy/doc/' \ --exclude '/cowboy/examples/' \ --exclude '/rabbitmq_amqp1_0/test/swiftmq/build/'\ From a024f3ca6ed6f36b0136b978a664ba3877fffe4b Mon Sep 17 00:00:00 2001 From: kjnilsson Date: Fri, 13 May 2016 11:11:28 +0100 Subject: [PATCH 087/174] add vhost information to binding and policy events --- src/rabbit_binding.erl | 4 +++- src/rabbit_policy.erl | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 299e254c509a..8904c1dd74ae 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -100,7 +100,8 @@ -define(INFO_KEYS, [source_name, source_kind, destination_name, destination_kind, - routing_key, arguments]). + routing_key, arguments, + vhost]). recover(XNames, QNames) -> rabbit_misc:table_filter( @@ -272,6 +273,7 @@ infos(Items, B) -> [{Item, i(Item, B)} || Item <- Items]. i(source_name, #binding{source = SrcName}) -> SrcName#resource.name; i(source_kind, #binding{source = SrcName}) -> SrcName#resource.kind; +i(vhost, #binding{source = SrcName}) -> SrcName#resource.virtual_host; i(destination_name, #binding{destination = DstName}) -> DstName#resource.name; i(destination_kind, #binding{destination = DstName}) -> DstName#resource.kind; i(routing_key, #binding{key = RoutingKey}) -> RoutingKey; diff --git a/src/rabbit_policy.erl b/src/rabbit_policy.erl index d04551043e6a..eb8cf6332737 100644 --- a/src/rabbit_policy.erl +++ b/src/rabbit_policy.erl @@ -221,11 +221,11 @@ validate(_VHost, <<"policy">>, Name, Term, _User) -> Name, policy_validation(), Term). notify(VHost, <<"policy">>, Name, Term) -> - rabbit_event:notify(policy_set, [{name, Name} | Term]), + rabbit_event:notify(policy_set, [{name, Name}, {vhost, VHost} | Term]), update_policies(VHost). notify_clear(VHost, <<"policy">>, Name) -> - rabbit_event:notify(policy_cleared, [{name, Name}]), + rabbit_event:notify(policy_cleared, [{name, Name}, {vhost, VHost}]), update_policies(VHost). %%---------------------------------------------------------------------------- From 8e9ec88fb5ad318cf636770e95bda7c70400a5fb Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Fri, 13 May 2016 12:13:43 +0100 Subject: [PATCH 088/174] Limit priority to maximum during synchronisation --- src/rabbit_priority_queue.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_priority_queue.erl b/src/rabbit_priority_queue.erl index b58a8c535eb1..6141796f7b20 100644 --- a/src/rabbit_priority_queue.erl +++ b/src/rabbit_priority_queue.erl @@ -563,8 +563,9 @@ findfold3(Fun, Acc, NotFound, [{P, BQSN} | Rest], BQSAcc) -> findfold3(_Fun, Acc, NotFound, [], BQSAcc) -> {NotFound, Acc, lists:reverse(BQSAcc)}. -bq_fetch(P, []) -> exit({not_found, P}); +bq_fetch(P, []) -> exit({not_found, P}); bq_fetch(P, [{P, BQSN} | _]) -> BQSN; +bq_fetch(P, [{P1, BQSN} | _]) when P > P1 -> BQSN; bq_fetch(P, [{_, _BQSN} | T]) -> bq_fetch(P, T). bq_store(P, BQS, BQSs) -> From 77332e682250f4e6a315de81abadd9e3a682e416 Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Sat, 14 May 2016 09:40:15 +0100 Subject: [PATCH 089/174] Use AckRequired in drop_one as priority queues return tuples {Priority, AckTag} * Fetch and drop operations of priority queues' consumers with no_ack=true, were changed internally to no_ack=false in the requests to the mirror slaves. Those messages would never be acknowledge by the consumer, as it was not part of the consumers configuration. --- src/rabbit_mirror_queue_master.erl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index e447e9de820a..8246fcea7eaa 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -363,7 +363,7 @@ fetch(AckRequired, State = #state { backing_queue = BQ, State1 = State #state { backing_queue_state = BQS1 }, {Result, case Result of empty -> State1; - {_MsgId, _IsDelivered, AckTag} -> drop_one(AckTag, State1) + {_MsgId, _IsDelivered, _AckTag} -> drop_one(AckRequired, State1) end}. drop(AckRequired, State = #state { backing_queue = BQ, @@ -372,7 +372,7 @@ drop(AckRequired, State = #state { backing_queue = BQ, State1 = State #state { backing_queue_state = BQS1 }, {Result, case Result of empty -> State1; - {_MsgId, AckTag} -> drop_one(AckTag, State1) + {_MsgId, _AckTag} -> drop_one(AckRequired, State1) end}. ack(AckTags, State = #state { gm = GM, @@ -556,10 +556,10 @@ depth_fun() -> %% Helpers %% --------------------------------------------------------------------------- -drop_one(AckTag, State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS }) -> - ok = gm:broadcast(GM, {drop, BQ:len(BQS), 1, AckTag =/= undefined}), +drop_one(AckRequired, State = #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS }) -> + ok = gm:broadcast(GM, {drop, BQ:len(BQS), 1, AckRequired}), State. drop(PrevLen, AckRequired, State = #state { gm = GM, From 98ef0080bcda637fa188bcac1bcbd66d15e51ae1 Mon Sep 17 00:00:00 2001 From: Alexey Lebedeff Date: Mon, 16 May 2016 14:34:40 +0300 Subject: [PATCH 090/174] Add explicit epmd dependency in systemd example --- docs/rabbitmq-server.service.example | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/rabbitmq-server.service.example b/docs/rabbitmq-server.service.example index 5a8f1cd73e7c..0a0895036547 100644 --- a/docs/rabbitmq-server.service.example +++ b/docs/rabbitmq-server.service.example @@ -1,7 +1,8 @@ # systemd unit example [Unit] Description=RabbitMQ broker -After=syslog.target network.target +After=syslog.target network.target epmd@0.0.0.0.socket +Wants=network.target epmd@0.0.0.0.socket [Service] Type=notify From 4a2b17f6d9b3a7aca95caf8cb22e5bef9e61fe1c Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Tue, 17 May 2016 09:24:34 +0300 Subject: [PATCH 091/174] Only test against Erlang 18.3 --- .travis.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 54ef56a5e62d..48146e2978f1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,9 +10,6 @@ addons: packages: - xsltproc otp_release: - - "R16B03-1" - - "17.5" - - "18.0" - "18.3" env: matrix: From 1e349682ede277150610a92923e8af54ac764902 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Tue, 17 May 2016 10:16:07 +0300 Subject: [PATCH 092/174] Update rabbitmq-components.mk --- rabbitmq-components.mk | 2 ++ 1 file changed, 2 insertions(+) diff --git a/rabbitmq-components.mk b/rabbitmq-components.mk index b200585b30f4..3e784e824710 100644 --- a/rabbitmq-components.mk +++ b/rabbitmq-components.mk @@ -44,6 +44,7 @@ dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(curren dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master +dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_lvc = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master @@ -99,6 +100,7 @@ RABBITMQ_COMPONENTS = amqp_client \ rabbitmq_federation \ rabbitmq_federation_management \ rabbitmq_java_client \ + rabbitmq_jms_topic_exchange \ rabbitmq_lvc \ rabbitmq_management \ rabbitmq_management_agent \ From 8fa920d91cb3c12ef6caf6dc818ef8d0661beaff Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Tue, 17 May 2016 11:17:51 +0300 Subject: [PATCH 093/174] Update rabbitmq-components.mk --- rabbitmq-components.mk | 2 ++ 1 file changed, 2 insertions(+) diff --git a/rabbitmq-components.mk b/rabbitmq-components.mk index b200585b30f4..3e784e824710 100644 --- a/rabbitmq-components.mk +++ b/rabbitmq-components.mk @@ -44,6 +44,7 @@ dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(curren dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master +dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_lvc = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master @@ -99,6 +100,7 @@ RABBITMQ_COMPONENTS = amqp_client \ rabbitmq_federation \ rabbitmq_federation_management \ rabbitmq_java_client \ + rabbitmq_jms_topic_exchange \ rabbitmq_lvc \ rabbitmq_management \ rabbitmq_management_agent \ From 16f72ad45d9aefb33a3f7996a5a9d6bad3045b28 Mon Sep 17 00:00:00 2001 From: Peter Lemenkov Date: Wed, 18 May 2016 14:23:29 +0300 Subject: [PATCH 094/174] Remove systemd dependency on syslog target Neither rabbitmq nor any dependent services in chain actually relies on syslog, so why keep it? This tested in Fedora since 2016-01. See this commit: http://pkgs.fedoraproject.org/cgit/rpms/rabbitmq-server.git/commit/?id=1477671 Nobody complained so far. Signed-off-by: Peter Lemenkov --- docs/rabbitmq-server.service.example | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/rabbitmq-server.service.example b/docs/rabbitmq-server.service.example index 0a0895036547..1aa6549b64b7 100644 --- a/docs/rabbitmq-server.service.example +++ b/docs/rabbitmq-server.service.example @@ -1,7 +1,7 @@ # systemd unit example [Unit] Description=RabbitMQ broker -After=syslog.target network.target epmd@0.0.0.0.socket +After=network.target epmd@0.0.0.0.socket Wants=network.target epmd@0.0.0.0.socket [Service] From 61e16f2dc264ab5a7a71de8acd620ba7278f8fae Mon Sep 17 00:00:00 2001 From: kjnilsson Date: Wed, 18 May 2016 12:55:27 +0100 Subject: [PATCH 095/174] Update rabbitmq-components.mk --- rabbitmq-components.mk | 2 ++ 1 file changed, 2 insertions(+) diff --git a/rabbitmq-components.mk b/rabbitmq-components.mk index 3e784e824710..920a67b1210a 100644 --- a/rabbitmq-components.mk +++ b/rabbitmq-components.mk @@ -63,6 +63,7 @@ dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master +dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master @@ -120,6 +121,7 @@ RABBITMQ_COMPONENTS = amqp_client \ rabbitmq_toke \ rabbitmq_top \ rabbitmq_tracing \ + rabbitmq_trust_store \ rabbitmq_web_dispatch \ rabbitmq_web_mqtt \ rabbitmq_web_mqtt_examples \ From d963bc8eff23802fb9f7ed9b0fa0b2f2040d9c1e Mon Sep 17 00:00:00 2001 From: Alexey Lebedeff Date: Wed, 9 Mar 2016 18:09:04 +0300 Subject: [PATCH 096/174] Avoid RPC roundtrips in list commands Current implementation of various `list_XXX` commands require cross-node roundtrip for every processed item - because `rabbitmqctl` target node is responsible for gathering global list of all items of interest (channels etc.) and then processing them one by one. For example, listing 10000 channels evenly distributed across 3 nodes where network has 1ms delay takes more than 10 seconds on my machine. And with the proposed change listing will take almost the same time as it'll take to gather this info locally. E.g. in the case above listing now takes 0.7 second on the same machine with same 1ms delay. It works by invoking emitting_map on every node, where it should send info about only local items to aggregator, in an async fashion - as no reply from aggregator is needed. --- src/rabbit_control_main.erl | 154 ++++++++++++++++++++++-------------- 1 file changed, 96 insertions(+), 58 deletions(-) diff --git a/src/rabbit_control_main.erl b/src/rabbit_control_main.erl index 7f653c37801e..da5756db3d03 100644 --- a/src/rabbit_control_main.erl +++ b/src/rabbit_control_main.erl @@ -23,7 +23,7 @@ sync_queue/1, cancel_sync_queue/1, become/1, purge_queue/1]). --import(rabbit_misc, [rpc_call/4, rpc_call/5, rpc_call/7]). +-import(rabbit_misc, [rpc_call/4, rpc_call/5]). -define(EXTERNAL_CHECK_INTERVAL, 1000). @@ -579,56 +579,74 @@ action(purge_queue, Node, [Q], Opts, Inform, Timeout) -> action(list_users, Node, [], _Opts, Inform, Timeout) -> Inform("Listing users", []), - call(Node, {rabbit_auth_backend_internal, list_users, []}, - rabbit_auth_backend_internal:user_info_keys(), true, Timeout); + call_emitter(Node, {rabbit_auth_backend_internal, list_users, []}, + rabbit_auth_backend_internal:user_info_keys(), + [{timeout, Timeout}, to_bin_utf8]); action(list_permissions, Node, [], Opts, Inform, Timeout) -> VHost = proplists:get_value(?VHOST_OPT, Opts), Inform("Listing permissions in vhost \"~s\"", [VHost]), - call(Node, {rabbit_auth_backend_internal, list_vhost_permissions, [VHost]}, - rabbit_auth_backend_internal:vhost_perms_info_keys(), true, Timeout, - true); + call_emitter(Node, {rabbit_auth_backend_internal, list_vhost_permissions, [VHost]}, + rabbit_auth_backend_internal:vhost_perms_info_keys(), + [{timeout, Timeout}, to_bin_utf8, is_escaped]); action(list_parameters, Node, [], Opts, Inform, Timeout) -> VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), Inform("Listing runtime parameters", []), - call(Node, {rabbit_runtime_parameters, list_formatted, [VHostArg]}, - rabbit_runtime_parameters:info_keys(), Timeout); + call_emitter(Node, {rabbit_runtime_parameters, list_formatted, [VHostArg]}, + rabbit_runtime_parameters:info_keys(), + [{timeout, Timeout}]); action(list_policies, Node, [], Opts, Inform, Timeout) -> VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), Inform("Listing policies", []), - call(Node, {rabbit_policy, list_formatted, [VHostArg]}, - rabbit_policy:info_keys(), Timeout); + call_emitter(Node, {rabbit_policy, list_formatted, [VHostArg]}, + rabbit_policy:info_keys(), + [{timeout, Timeout}]); action(list_vhosts, Node, Args, _Opts, Inform, Timeout) -> Inform("Listing vhosts", []), ArgAtoms = default_if_empty(Args, [name]), - call(Node, {rabbit_vhost, info_all, []}, ArgAtoms, true, Timeout); + call_emitter(Node, {rabbit_vhost, info_all, []}, ArgAtoms, + [{timeout, Timeout}, to_bin_utf8]); action(list_user_permissions, _Node, _Args = [], _Opts, _Inform, _Timeout) -> {error_string, "list_user_permissions expects a username argument, but none provided."}; action(list_user_permissions, Node, Args = [_Username], _Opts, Inform, Timeout) -> Inform("Listing permissions for user ~p", Args), - call(Node, {rabbit_auth_backend_internal, list_user_permissions, Args}, - rabbit_auth_backend_internal:user_perms_info_keys(), true, Timeout, - true); + call_emitter(Node, {rabbit_auth_backend_internal, list_user_permissions, Args}, + rabbit_auth_backend_internal:user_perms_info_keys(), + [{timeout, Timeout}, to_bin_utf8, is_escaped]); action(list_queues, Node, Args, Opts, Inform, Timeout) -> - [Online, Offline] = rabbit_cli:filter_opts(Opts, [?ONLINE_OPT, ?OFFLINE_OPT]), Inform("Listing queues", []), + %% User options + [Online, Offline] = rabbit_cli:filter_opts(Opts, [?ONLINE_OPT, ?OFFLINE_OPT]), VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), ArgAtoms = default_if_empty(Args, [name, messages]), - call(Node, {rabbit_amqqueue, info_all, [VHostArg, ArgAtoms, Online, Offline]}, - ArgAtoms, Timeout); + + %% Data for emission + Nodes = nodes_in_cluster(Node, Timeout), + OnlineChunks = if Online -> length(Nodes); true -> 0 end, + OfflineChunks = if Offline -> 1; true -> 0 end, + ChunksOpt = {chunks, OnlineChunks + OfflineChunks}, + TimeoutOpt = {timeout, Timeout}, + EmissionRef = make_ref(), + EmissionRefOpt = {ref, EmissionRef}, + + _ = Online andalso start_emission(Node, {rabbit_amqqueue, emit_info_all, [Nodes, VHostArg, ArgAtoms]}, + [TimeoutOpt, EmissionRefOpt]), + _ = Offline andalso start_emission(Node, {rabbit_amqqueue, emit_info_down, [VHostArg, ArgAtoms]}, + [TimeoutOpt, EmissionRefOpt]), + display_emission_result(EmissionRef, ArgAtoms, [ChunksOpt, TimeoutOpt]); action(list_exchanges, Node, Args, Opts, Inform, Timeout) -> Inform("Listing exchanges", []), VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), ArgAtoms = default_if_empty(Args, [name, type]), - call(Node, {rabbit_exchange, info_all, [VHostArg, ArgAtoms]}, - ArgAtoms, Timeout); + call_emitter(Node, {rabbit_exchange, info_all, [VHostArg, ArgAtoms]}, + ArgAtoms, [{timeout, Timeout}]); action(list_bindings, Node, Args, Opts, Inform, Timeout) -> Inform("Listing bindings", []), @@ -636,27 +654,31 @@ action(list_bindings, Node, Args, Opts, Inform, Timeout) -> ArgAtoms = default_if_empty(Args, [source_name, source_kind, destination_name, destination_kind, routing_key, arguments]), - call(Node, {rabbit_binding, info_all, [VHostArg, ArgAtoms]}, - ArgAtoms, Timeout); + call_emitter(Node, {rabbit_binding, info_all, [VHostArg, ArgAtoms]}, + ArgAtoms, [{timeout, Timeout}]); action(list_connections, Node, Args, _Opts, Inform, Timeout) -> Inform("Listing connections", []), ArgAtoms = default_if_empty(Args, [user, peer_host, peer_port, state]), - call(Node, {rabbit_networking, connection_info_all, [ArgAtoms]}, - ArgAtoms, Timeout); + Nodes = nodes_in_cluster(Node, Timeout), + call_emitter(Node, {rabbit_networking, emit_connection_info_all, [Nodes, ArgAtoms]}, + ArgAtoms, [{timeout, Timeout}, {chunks, length(Nodes)}]); action(list_channels, Node, Args, _Opts, Inform, Timeout) -> Inform("Listing channels", []), ArgAtoms = default_if_empty(Args, [pid, user, consumer_count, messages_unacknowledged]), - call(Node, {rabbit_channel, info_all, [ArgAtoms]}, - ArgAtoms, Timeout); + Nodes = nodes_in_cluster(Node, Timeout), + call_emitter(Node, {rabbit_channel, emit_info_all, [Nodes, ArgAtoms]}, ArgAtoms, + [{timeout, Timeout}, {chunks, length(Nodes)}]); action(list_consumers, Node, _Args, Opts, Inform, Timeout) -> Inform("Listing consumers", []), VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), - call(Node, {rabbit_amqqueue, consumers_all, [VHostArg]}, - rabbit_amqqueue:consumer_info_keys(), Timeout). + Nodes = nodes_in_cluster(Node, Timeout), + call_emitter(Node, {rabbit_amqqueue, emit_consumers_all, [Nodes, VHostArg]}, + rabbit_amqqueue:consumer_info_keys(), + [{timeout, Timeout}, {chunks, length(Nodes)}]). format_parse_error({_Line, Mod, Err}) -> lists:flatten(Mod:format_error(Err)). @@ -766,17 +788,18 @@ display_info_message_row(IsEscaped, Result, InfoItemKeys) -> {X, Value} -> Value end, IsEscaped) || X <- InfoItemKeys]). -display_info_message(IsEscaped) -> +display_info_message(IsEscaped, InfoItemKeys) -> fun ([], _) -> ok; - ([FirstResult|_] = List, InfoItemKeys) when is_list(FirstResult) -> + ([FirstResult|_] = List, _) when is_list(FirstResult) -> lists:foreach(fun(Result) -> display_info_message_row(IsEscaped, Result, InfoItemKeys) end, List), ok; - (Result, InfoItemKeys) -> - display_info_message_row(IsEscaped, Result, InfoItemKeys) + (Result, _) -> + display_info_message_row(IsEscaped, Result, InfoItemKeys), + ok end. display_info_list(Results, InfoItemKeys) when is_list(Results) -> @@ -833,7 +856,10 @@ display_call_result(Node, MFA) -> end. unsafe_rpc(Node, Mod, Fun, Args) -> - case rpc_call(Node, Mod, Fun, Args) of + unsafe_rpc(Node, Mod, Fun, Args, ?RPC_TIMEOUT). + +unsafe_rpc(Node, Mod, Fun, Args, Timeout) -> + case rpc_call(Node, Mod, Fun, Args, Timeout) of {badrpc, _} = Res -> throw(Res); Normal -> Normal end. @@ -852,33 +878,42 @@ ensure_app_running(Node) -> call(Node, {Mod, Fun, Args}) -> rpc_call(Node, Mod, Fun, lists:map(fun list_to_binary_utf8/1, Args)). -call(Node, {Mod, Fun, Args}, InfoKeys, Timeout) -> - call(Node, {Mod, Fun, Args}, InfoKeys, false, Timeout, false). +call_emitter(Node, {Mod, Fun, Args}, InfoKeys, Opts) -> + Ref = start_emission(Node, {Mod, Fun, Args}, Opts), + display_emission_result(Ref, InfoKeys, Opts). + +start_emission(Node, {Mod, Fun, Args}, Opts) -> + ToBinUtf8 = proplists:get_value(to_bin_utf8, Opts, false), + Timeout = proplists:get_value(timeout, Opts, infinity), + Ref = proplists:get_value(ref, Opts, make_ref()), + rabbit_control_misc:spawn_emitter_caller( + Node, Mod, Fun, prepare_call_args(Args, ToBinUtf8), + Ref, self(), Timeout), + Ref. + +display_emission_result(Ref, InfoKeys, Opts) -> + IsEscaped = proplists:get_value(is_escaped, Opts, false), + Chunks = proplists:get_value(chunks, Opts, 1), + Timeout = proplists:get_value(timeout, Opts, infinity), + EmissionStatus = rabbit_control_misc:wait_for_info_messages( + self(), Ref, display_info_message(IsEscaped, InfoKeys), ok, Timeout, Chunks), + emission_to_action_result(EmissionStatus). + +%% Convert rabbit_control_misc:wait_for_info_messages/6 return value +%% into form expected by rabbit_cli:main/3. +emission_to_action_result({ok, ok}) -> + ok; +emission_to_action_result({error, Error}) -> + Error. -call(Node, {Mod, Fun, Args}, InfoKeys, ToBinUtf8, Timeout) -> - call(Node, {Mod, Fun, Args}, InfoKeys, ToBinUtf8, Timeout, false). +prepare_call_args(Args, ToBinUtf8) -> + case ToBinUtf8 of + true -> valid_utf8_args(Args); + false -> Args + end. -call(Node, {Mod, Fun, Args}, InfoKeys, ToBinUtf8, Timeout, IsEscaped) -> - Args0 = case ToBinUtf8 of - true -> lists:map(fun list_to_binary_utf8/1, Args); - false -> Args - end, - Ref = make_ref(), - Pid = self(), - spawn_link( - fun () -> - case rabbit_cli:rpc_call(Node, Mod, Fun, Args0, - Ref, Pid, Timeout) of - {error, _} = Error -> - Pid ! {error, Error}; - {bad_argument, _} = Error -> - Pid ! {error, Error}; - _ -> - ok - end - end), - rabbit_control_misc:wait_for_info_messages( - Pid, Ref, InfoKeys, display_info_message(IsEscaped), Timeout). +valid_utf8_args(Args) -> + lists:map(fun list_to_binary_utf8/1, Args). list_to_binary_utf8(L) -> B = list_to_binary(L), @@ -928,7 +963,10 @@ split_list([_]) -> exit(even_list_needed); split_list([A, B | T]) -> [{A, B} | split_list(T)]. nodes_in_cluster(Node) -> - unsafe_rpc(Node, rabbit_mnesia, cluster_nodes, [running]). + unsafe_rpc(Node, rabbit_mnesia, cluster_nodes, [running], ?RPC_TIMEOUT). + +nodes_in_cluster(Node, Timeout) -> + unsafe_rpc(Node, rabbit_mnesia, cluster_nodes, [running], Timeout). alarms_by_node(Name) -> Status = unsafe_rpc(Name, rabbit, status, []), From 02146c99661fa0ff066387ec1b4648361cdda28e Mon Sep 17 00:00:00 2001 From: Peter Lemenkov Date: Wed, 18 May 2016 14:23:29 +0300 Subject: [PATCH 097/174] Remove systemd dependency on syslog target Neither rabbitmq nor any dependent services in chain actually relies on syslog, so why keep it? This tested in Fedora since 2016-01. See this commit: http://pkgs.fedoraproject.org/cgit/rpms/rabbitmq-server.git/commit/?id=1477671 Nobody complained so far. Signed-off-by: Peter Lemenkov --- docs/rabbitmq-server.service.example | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/rabbitmq-server.service.example b/docs/rabbitmq-server.service.example index 0a0895036547..1aa6549b64b7 100644 --- a/docs/rabbitmq-server.service.example +++ b/docs/rabbitmq-server.service.example @@ -1,7 +1,7 @@ # systemd unit example [Unit] Description=RabbitMQ broker -After=syslog.target network.target epmd@0.0.0.0.socket +After=network.target epmd@0.0.0.0.socket Wants=network.target epmd@0.0.0.0.socket [Service] From 1dd9ae5f24b2b4f7e6b99d648d37cfc5216ceee6 Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Wed, 18 May 2016 16:18:23 +0100 Subject: [PATCH 098/174] Use max priority for higher priorities when returning backing queue state * Needed in synchronisation, otherwise messages for higher priorities are dropped in the slave --- src/rabbit_priority_queue.erl | 68 ++++++++++++++++------------------- 1 file changed, 31 insertions(+), 37 deletions(-) diff --git a/src/rabbit_priority_queue.erl b/src/rabbit_priority_queue.erl index 6141796f7b20..a3bfb5cdfa40 100644 --- a/src/rabbit_priority_queue.erl +++ b/src/rabbit_priority_queue.erl @@ -205,8 +205,8 @@ publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State = #passthrough{bq = BQ, bqs = BQS}) -> ?passthrough1(publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQS)). -batch_publish(Publishes, ChPid, Flow, State = #state{bq = BQ}) -> - PubDict = partition_publish_batch(Publishes), +batch_publish(Publishes, ChPid, Flow, State = #state{bq = BQ, bqss = [{MaxP, _} |_]}) -> + PubDict = partition_publish_batch(Publishes, MaxP), lists:foldl( fun ({Priority, Pubs}, St) -> pick1(fun (_P, BQSN) -> @@ -227,8 +227,8 @@ publish_delivered(Msg, MsgProps, ChPid, Flow, State = #passthrough{bq = BQ, bqs = BQS}) -> ?passthrough2(publish_delivered(Msg, MsgProps, ChPid, Flow, BQS)). -batch_publish_delivered(Publishes, ChPid, Flow, State = #state{bq = BQ}) -> - PubDict = partition_publish_delivered_batch(Publishes), +batch_publish_delivered(Publishes, ChPid, Flow, State = #state{bq = BQ, bqss = [{MaxP, _} |_]}) -> + PubDict = partition_publish_delivered_batch(Publishes, MaxP), {PrioritiesAndAcks, State1} = lists:foldl( fun ({Priority, Pubs}, {PriosAndAcks, St}) -> @@ -404,7 +404,6 @@ msg_rates(#state{bq = BQ, bqss = BQSs}) -> end, {0.0, 0.0}, BQSs); msg_rates(#passthrough{bq = BQ, bqs = BQS}) -> BQ:msg_rates(BQS). - info(backing_queue_status, #state{bq = BQ, bqss = BQSs}) -> fold0(fun (P, BQSN, Acc) -> combine_status(P, BQ:info(backing_queue_status, BQSN), Acc) @@ -433,8 +432,8 @@ set_queue_mode(Mode, State = #state{bq = BQ}) -> set_queue_mode(Mode, State = #passthrough{bq = BQ, bqs = BQS}) -> ?passthrough1(set_queue_mode(Mode, BQS)). -zip_msgs_and_acks(Msgs, AckTags, Accumulator, #state{}) -> - MsgsByPriority = partition_publish_delivered_batch(Msgs), +zip_msgs_and_acks(Msgs, AckTags, Accumulator, #state{bqss = [{MaxP, _} |_]}) -> + MsgsByPriority = partition_publish_delivered_batch(Msgs, MaxP), lists:foldl(fun (Acks, MAs) -> {P, _AckTag} = hd(Acks), Pubs = orddict:fetch(P, MsgsByPriority), @@ -484,13 +483,14 @@ foreach1(_Fun, [], BQSAcc) -> %% For a given thing, just go to its BQ pick1(Fun, Prioritisable, #state{bqss = BQSs} = State) -> - {P, BQSN} = priority(Prioritisable, BQSs), + {P, BQSN} = priority_bq(Prioritisable, BQSs), a(State#state{bqss = bq_store(P, Fun(P, BQSN), BQSs)}). %% Fold over results fold2(Fun, Acc, State = #state{bqss = BQSs}) -> {Res, BQSs1} = fold2(Fun, Acc, BQSs, []), {Res, a(State#state{bqss = BQSs1})}. + fold2(Fun, Acc, [{P, BQSN} | Rest], BQSAcc) -> {Acc1, BQSN1} = Fun(P, BQSN, Acc), fold2(Fun, Acc1, Rest, [{P, BQSN1} | BQSAcc]); @@ -532,7 +532,7 @@ fold_by_acktags2(Fun, AckTags, State) -> %% For a given thing, just go to its BQ pick2(Fun, Prioritisable, #state{bqss = BQSs} = State) -> - {P, BQSN} = priority(Prioritisable, BQSs), + {P, BQSN} = priority_bq(Prioritisable, BQSs), {Res, BQSN1} = Fun(P, BQSN), {Res, a(State#state{bqss = bq_store(P, BQSN1, BQSs)})}. @@ -564,8 +564,7 @@ findfold3(_Fun, Acc, NotFound, [], BQSAcc) -> {NotFound, Acc, lists:reverse(BQSAcc)}. bq_fetch(P, []) -> exit({not_found, P}); -bq_fetch(P, [{P, BQSN} | _]) -> BQSN; -bq_fetch(P, [{P1, BQSN} | _]) when P > P1 -> BQSN; +bq_fetch(P, [{P, BQSN} | _]) -> {P, BQSN}; bq_fetch(P, [{_, _BQSN} | T]) -> bq_fetch(P, T). bq_store(P, BQS, BQSs) -> @@ -583,41 +582,36 @@ a(State = #state{bqss = BQSs}) -> end. %%---------------------------------------------------------------------------- -partition_publish_batch(Publishes) -> +partition_publish_batch(Publishes, MaxP) -> partition_publishes( - Publishes, fun ({Msg, _, _}) -> Msg end). + Publishes, fun ({Msg, _, _}) -> Msg end, MaxP). -partition_publish_delivered_batch(Publishes) -> +partition_publish_delivered_batch(Publishes, MaxP) -> partition_publishes( - Publishes, fun ({Msg, _}) -> Msg end). + Publishes, fun ({Msg, _}) -> Msg end, MaxP). -partition_publishes(Publishes, ExtractMsg) -> +partition_publishes(Publishes, ExtractMsg, MaxP) -> lists:foldl(fun (Pub, Dict) -> Msg = ExtractMsg(Pub), - rabbit_misc:orddict_cons(priority2(Msg), Pub, Dict) + rabbit_misc:orddict_cons(priority(Msg, MaxP), Pub, Dict) end, orddict:new(), Publishes). -priority(P, BQSs) when is_integer(P) -> - {P, bq_fetch(P, BQSs)}; -priority(#basic_message{content = Content}, BQSs) -> - priority1(rabbit_binary_parser:ensure_content_decoded(Content), BQSs). - -priority1(_Content, [{P, BQSN}]) -> - {P, BQSN}; -priority1(Content, [{P, BQSN} | Rest]) -> - case priority2(Content) >= P of - true -> {P, BQSN}; - false -> priority1(Content, Rest) - end. - -priority2(#basic_message{content = Content}) -> - priority2(rabbit_binary_parser:ensure_content_decoded(Content)); -priority2(#content{properties = Props}) -> +priority_bq(Priority, [{MaxP, _} | _] = BQSs) -> + bq_fetch(priority(Priority, MaxP), BQSs). + +%% Messages with a priority which is higher than the queue's maximum are treated +%% as if they were published with the maximum priority. +priority(undefined, _MaxP) -> + 0; +priority(Priority, MaxP) when is_integer(Priority), Priority =< MaxP -> + Priority; +priority(Priority, MaxP) when is_integer(Priority), Priority > MaxP -> + MaxP; +priority(#basic_message{content = Content}, MaxP) -> + priority(rabbit_binary_parser:ensure_content_decoded(Content), MaxP); +priority(#content{properties = Props}, MaxP) -> #'P_basic'{priority = Priority0} = Props, - case Priority0 of - undefined -> 0; - _ when is_integer(Priority0) -> Priority0 - end. + priority(Priority0, MaxP). add_maybe_infinity(infinity, _) -> infinity; add_maybe_infinity(_, infinity) -> infinity; From 776dd2423f8e64ad7e387a7bb8e3b34e484f3c9d Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Thu, 19 May 2016 12:45:09 +0100 Subject: [PATCH 099/174] Commit package release notes for 3.6.2 --- packaging/RPMS/Fedora/rabbitmq-server.spec | 3 +++ packaging/debs/Debian/debian/changelog | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/packaging/RPMS/Fedora/rabbitmq-server.spec b/packaging/RPMS/Fedora/rabbitmq-server.spec index 5996b553aea2..5113de4755aa 100644 --- a/packaging/RPMS/Fedora/rabbitmq-server.spec +++ b/packaging/RPMS/Fedora/rabbitmq-server.spec @@ -129,6 +129,9 @@ done rm -rf %{buildroot} %changelog +* Thu May 19 2016 michael@rabbitmq.com 3.6.2-1 +- New Upstream Release + * Tue Mar 1 2016 michael@rabbitmq.com 3.6.1-1 - New Upstream Release diff --git a/packaging/debs/Debian/debian/changelog b/packaging/debs/Debian/debian/changelog index adf8ce5aa546..52459181a66a 100644 --- a/packaging/debs/Debian/debian/changelog +++ b/packaging/debs/Debian/debian/changelog @@ -1,3 +1,9 @@ +rabbitmq-server (3.6.2-1) unstable; urgency=low + + * New Upstream Release + + -- Michael Klishin Thu, 19 May 2016 09:20:06 +0100 + rabbitmq-server (3.6.1-1) unstable; urgency=low * New Upstream Release From 36365ab29fb13034e1928cecc963a50e8b915d68 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Tue, 12 Apr 2016 11:39:50 +0100 Subject: [PATCH 100/174] Support for type-specific exchange info --- src/rabbit_exchange.erl | 6 ++++-- src/rabbit_exchange_type_direct.erl | 4 ++++ src/rabbit_exchange_type_fanout.erl | 4 ++++ src/rabbit_exchange_type_headers.erl | 4 ++++ src/rabbit_exchange_type_invalid.erl | 4 ++++ src/rabbit_exchange_type_topic.erl | 4 ++++ 6 files changed, 24 insertions(+), 2 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 2e9afbfd2e7d..d85ff25834b8 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -348,9 +348,11 @@ i(policy, X) -> case rabbit_policy:name(X) of end; i(Item, _) -> throw({bad_argument, Item}). -info(X = #exchange{}) -> infos(?INFO_KEYS, X). +info(X = #exchange{type = Type}) -> + infos(?INFO_KEYS, X) ++ (type_to_module(Type)):info(X). -info(X = #exchange{}, Items) -> infos(Items, X). +info(X = #exchange{type = Type}, Items) -> + infos(Items, X) ++ (type_to_module(Type)):info(X, Items). info_all(VHostPath) -> map(VHostPath, fun (X) -> info(X) end). diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index 8a6886e37690..ed675b572a55 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -23,6 +23,7 @@ -export([validate/1, validate_binding/2, create/2, delete/3, policy_changed/2, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). +-export([info/1, info/2]). -rabbit_boot_step({?MODULE, [{description, "exchange type direct"}, @@ -31,6 +32,9 @@ {requires, rabbit_registry}, {enables, kernel_ready}]}). +info(_X) -> []. +info(_X, _) -> []. + description() -> [{description, <<"AMQP direct exchange, as per the AMQP specification">>}]. diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index d81e407f8f57..3aebc07b41ec 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -23,6 +23,7 @@ -export([validate/1, validate_binding/2, create/2, delete/3, policy_changed/2, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). +-export([info/1, info/2]). -rabbit_boot_step({?MODULE, [{description, "exchange type fanout"}, @@ -31,6 +32,9 @@ {requires, rabbit_registry}, {enables, kernel_ready}]}). +info(_X) -> []. +info(_X, _) -> []. + description() -> [{description, <<"AMQP fanout exchange, as per the AMQP specification">>}]. diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index fe344ba86e35..69b3db4d6517 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -24,6 +24,7 @@ -export([validate/1, validate_binding/2, create/2, delete/3, policy_changed/2, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). +-export([info/1, info/2]). -rabbit_boot_step({?MODULE, [{description, "exchange type headers"}, @@ -37,6 +38,9 @@ rabbit_framing:amqp_table()) -> boolean()). -endif. +info(_X) -> []. +info(_X, _) -> []. + description() -> [{description, <<"AMQP headers exchange, as per the AMQP specification">>}]. diff --git a/src/rabbit_exchange_type_invalid.erl b/src/rabbit_exchange_type_invalid.erl index c8ca7ecae41c..b2e2798e3a9c 100644 --- a/src/rabbit_exchange_type_invalid.erl +++ b/src/rabbit_exchange_type_invalid.erl @@ -23,6 +23,10 @@ -export([validate/1, validate_binding/2, create/2, delete/3, policy_changed/2, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). +-export([info/1, info/2]). + +info(_X) -> []. +info(_X, _) -> []. description() -> [{description, diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 0eccb66cfd16..60be07042618 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -24,6 +24,7 @@ -export([validate/1, validate_binding/2, create/2, delete/3, policy_changed/2, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). +-export([info/1, info/2]). -rabbit_boot_step({?MODULE, [{description, "exchange type topic"}, @@ -34,6 +35,9 @@ %%---------------------------------------------------------------------------- +info(_X) -> []. +info(_X, _) -> []. + description() -> [{description, <<"AMQP topic exchange, as per the AMQP specification">>}]. From 270997fd46286aa69c9da5fb30124f8e93bc7512 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Mon, 18 Apr 2016 14:11:59 +0100 Subject: [PATCH 101/174] Select type specific info items --- src/rabbit_exchange.erl | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index d85ff25834b8..676a6561f249 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -346,13 +346,17 @@ i(policy, X) -> case rabbit_policy:name(X) of none -> ''; Policy -> Policy end; -i(Item, _) -> throw({bad_argument, Item}). +i(Item, #exchange{type = Type} = X) -> + case (type_to_module(Type)):info(X, [Item]) of + [{Item, I}] -> I; + [] -> throw({bad_argument, Item}) + end. info(X = #exchange{type = Type}) -> infos(?INFO_KEYS, X) ++ (type_to_module(Type)):info(X). info(X = #exchange{type = Type}, Items) -> - infos(Items, X) ++ (type_to_module(Type)):info(X, Items). + infos(Items, X). info_all(VHostPath) -> map(VHostPath, fun (X) -> info(X) end). From 4dd4fd61f4e55bff38ec1725fb9c0df7437ba53f Mon Sep 17 00:00:00 2001 From: Alexey Lebedeff Date: Fri, 20 May 2016 15:43:35 +0300 Subject: [PATCH 102/174] Allow caching HiPE-compilation results That way HiPE compilation can be performed during package installation and will not waste time during every startup. rabbit_hipe is refactored to support both modes of compilation - during every server startup or separately with caching in the filesystem. --- docs/rabbitmqctl.1.xml | 27 +++++++++++ scripts/rabbitmq-env | 1 + scripts/rabbitmq-server | 6 ++- src/rabbit_control_main.erl | 13 +++++- src/rabbit_file.erl | 10 +++++ src/rabbit_hipe.erl | 89 +++++++++++++++++++++++++++---------- 6 files changed, 121 insertions(+), 25 deletions(-) mode change 100644 => 100755 scripts/rabbitmq-env diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 1ecf8d4d3ad0..ec864af6cf9f 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -290,6 +290,33 @@ + + + hipe_compile directory + + + Performs HiPE-compilation and caches resulting + .beam-files in the given directory. + + + Parent directories are created if necessary. Any + existing .beam files from the + directory are automatically deleted prior to + compilation. + + + To use this precompiled files, you should set + RABBITMQ_SERVER_CODE_PATH environment + variable to directory specified in + hipe_compile invokation. + + For example: + rabbitmqctl hipe_compile /tmp/rabbit-hipe/ebin + + HiPE-compiles modules and stores them to /tmp/rabbit-hipe/ebin directory. + + + diff --git a/scripts/rabbitmq-env b/scripts/rabbitmq-env old mode 100644 new mode 100755 index 35239620cab2..def47ebd276d --- a/scripts/rabbitmq-env +++ b/scripts/rabbitmq-env @@ -177,6 +177,7 @@ DEFAULT_NODE_PORT=5672 [ "x" = "x$RABBITMQ_MNESIA_BASE" ] && RABBITMQ_MNESIA_BASE=${MNESIA_BASE} [ "x" = "x$RABBITMQ_SERVER_START_ARGS" ] && RABBITMQ_SERVER_START_ARGS=${SERVER_START_ARGS} [ "x" = "x$RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS" ] && RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=${SERVER_ADDITIONAL_ERL_ARGS} +[ "x" = "x$RABBITMQ_SERVER_CODE_PATH" ] && RABBITMQ_SERVER_CODE_PATH=${SERVER_CODE_PATH} [ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${MNESIA_DIR} [ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME} diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server index ab2975feb1fd..74337311cd11 100755 --- a/scripts/rabbitmq-server +++ b/scripts/rabbitmq-server @@ -133,11 +133,15 @@ ensure_thread_pool_size() { } start_rabbitmq_server() { + # "-pa ${RABBITMQ_SERVER_CODE_PATH}" should be the very first + # command-line argument. In case of using cached HiPE-compilation, + # this will allow for compiled versions of erlang built-in modules + # (e.g. lists) to be loaded. ensure_thread_pool_size check_start_params && RABBITMQ_CONFIG_FILE=$RABBITMQ_CONFIG_FILE \ exec ${ERL_DIR}erl \ - -pa ${RABBITMQ_EBIN_ROOT} \ + -pa ${RABBITMQ_SERVER_CODE_PATH} ${RABBITMQ_EBIN_ROOT} \ ${RABBITMQ_START_RABBIT} \ ${RABBITMQ_NAME_TYPE} ${RABBITMQ_NODENAME} \ -boot "${SASL_BOOT_FILE}" \ diff --git a/src/rabbit_control_main.erl b/src/rabbit_control_main.erl index f63694b65743..2df4fd96c0c7 100644 --- a/src/rabbit_control_main.erl +++ b/src/rabbit_control_main.erl @@ -37,6 +37,7 @@ reset, force_reset, rotate_logs, + hipe_compile, {join_cluster, [?RAM_DEF]}, change_cluster_node_type, @@ -113,7 +114,7 @@ [stop, stop_app, start_app, wait, reset, force_reset, rotate_logs, join_cluster, change_cluster_node_type, update_cluster_nodes, forget_cluster_node, rename_cluster_node, cluster_status, status, - environment, eval, force_boot, help, node_health_check]). + environment, eval, force_boot, help, node_health_check, hipe_compile]). -define(COMMANDS_WITH_TIMEOUT, [list_user_permissions, list_policies, list_queues, list_exchanges, @@ -383,6 +384,16 @@ action(rotate_logs, Node, Args = [Suffix], _Opts, Inform) -> Inform("Rotating logs to files with suffix \"~s\"", [Suffix]), call(Node, {rabbit, rotate_logs, Args}); +action(hipe_compile, _Node, [TargetDir], _Opts, _Inform) -> + ok = application:load(rabbit), + case rabbit_hipe:can_hipe_compile() of + true -> + {ok, _, _} = rabbit_hipe:compile_to_directory(TargetDir), + ok; + false -> + {error, "HiPE compilation is not supported"} + end; + action(close_connection, Node, [PidStr, Explanation], _Opts, Inform) -> Inform("Closing connection \"~s\"", [PidStr]), rpc_call(Node, rabbit_networking, close_connection, diff --git a/src/rabbit_file.erl b/src/rabbit_file.erl index 6c4f0e5ccde0..1088f2c2dd0c 100644 --- a/src/rabbit_file.erl +++ b/src/rabbit_file.erl @@ -23,6 +23,7 @@ -export([append_file/2, ensure_parent_dirs_exist/1]). -export([rename/2, delete/1, recursive_delete/1, recursive_copy/2]). -export([lock_file/1]). +-export([filename_as_a_directory/1]). -import(file_handle_cache, [with_handle/1, with_handle/2]). @@ -58,6 +59,7 @@ (file:filename(), file:filename()) -> rabbit_types:ok_or_error({file:filename(), file:filename(), any()})). -spec(lock_file/1 :: (file:filename()) -> rabbit_types:ok_or_error('eexist')). +-spec(filename_as_a_directory/1 :: (file:filename()) -> file:filename()). -endif. @@ -305,3 +307,11 @@ lock_file(Path) -> ok = prim_file:close(Lock) end) end. + +filename_as_a_directory(FileName) -> + case lists:last(FileName) of + "/" -> + FileName; + _ -> + FileName ++ "/" + end. diff --git a/src/rabbit_hipe.erl b/src/rabbit_hipe.erl index 05b5f3719d1d..494f0e06b7c5 100644 --- a/src/rabbit_hipe.erl +++ b/src/rabbit_hipe.erl @@ -5,15 +5,15 @@ %% practice 2 processes seems just as fast as any other number > 1, %% and keeps the progress bar realistic-ish. -define(HIPE_PROCESSES, 2). --export([maybe_hipe_compile/0, log_hipe_result/1]). -%% HiPE compilation happens before we have log handlers - so we have -%% to io:format/2, it's all we can do. +-export([maybe_hipe_compile/0, log_hipe_result/1]). +-export([compile_to_directory/1]). +-export([can_hipe_compile/0]). +%% Compile and load during server startup sequence maybe_hipe_compile() -> {ok, Want} = application:get_env(rabbit, hipe_compile), - Can = code:which(hipe) =/= non_existing, - case {Want, Can} of + case {Want, can_hipe_compile()} of {true, true} -> hipe_compile(); {true, false} -> false; {false, _} -> {ok, disabled} @@ -33,38 +33,49 @@ log_hipe_result(false) -> rabbit_log:warning( "Not HiPE compiling: HiPE not found in this Erlang installation.~n"). +hipe_compile() -> + hipe_compile(fun compile_and_load/1, false). + +compile_to_directory(Dir0) -> + Dir = rabbit_file:filename_as_a_directory(Dir0), + ok = prepare_ebin_directory(Dir), + hipe_compile(fun (Mod) -> compile_and_save(Mod, Dir) end, true). + +need_compile(Mod, Force) -> + Exists = code:which(Mod) =/= non_existing, + %% We skip modules already natively compiled. This + %% happens when RabbitMQ is stopped (just the + %% application, not the entire node) and started + %% again. + NotYetCompiled = not already_hipe_compiled(Mod), + NotVersioned = not compiled_with_version_support(Mod), + Exists andalso (Force orelse (NotYetCompiled andalso NotVersioned)). + %% HiPE compilation happens before we have log handlers and can take a %% long time, so make an exception to our no-stdout policy and display %% progress via stdout. -hipe_compile() -> +hipe_compile(CompileFun, Force) -> {ok, HipeModulesAll} = application:get_env(rabbit, hipe_modules), - HipeModules = [HM || HM <- HipeModulesAll, - code:which(HM) =/= non_existing andalso - %% We skip modules already natively compiled. This - %% happens when RabbitMQ is stopped (just the - %% application, not the entire node) and started - %% again. - already_hipe_compiled(HM) - andalso (not compiled_with_version_support(HM))], + HipeModules = lists:filter(fun(Mod) -> need_compile(Mod, Force) end, HipeModulesAll), case HipeModules of [] -> {ok, already_compiled}; - _ -> do_hipe_compile(HipeModules) + _ -> do_hipe_compile(HipeModules, CompileFun) end. already_hipe_compiled(Mod) -> try %% OTP 18.x or later - Mod:module_info(native) =:= false + Mod:module_info(native) =:= true %% OTP prior to 18.x catch error:badarg -> - code:is_module_native(Mod) =:= false + code:is_module_native(Mod) =:= true end. compiled_with_version_support(Mod) -> proplists:get_value(erlang_version_support, Mod:module_info(attributes)) =/= undefined. -do_hipe_compile(HipeModules) -> +do_hipe_compile(HipeModules, CompileFun) -> Count = length(HipeModules), io:format("~nHiPE compiling: |~s|~n |", [string:copies("-", Count)]), @@ -79,11 +90,7 @@ do_hipe_compile(HipeModules) -> %% advanced API does not load automatically the code, except if the %% 'load' option is set. PidMRefs = [spawn_monitor(fun () -> [begin - {M, Beam, _} = - code:get_object_code(M), - {ok, _} = - hipe:compile(M, [], Beam, - [o3, load]), + CompileFun(M), io:format("#") end || M <- Ms] end) || @@ -101,3 +108,39 @@ split(L, N) -> split0(L, [[] || _ <- lists:seq(1, N)]). split0([], Ls) -> Ls; split0([I | Is], [L | Ls]) -> split0(Is, Ls ++ [[I | L]]). + +prepare_ebin_directory(Dir) -> + ok = rabbit_file:ensure_dir(Dir), + ok = delete_beam_files(Dir), + ok. + +delete_beam_files(Dir) -> + {ok, Files} = file:list_dir(Dir), + lists:foreach(fun(File) -> + case filename:extension(File) of + ".beam" -> + ok = file:delete(filename:join([Dir, File])); + _ -> + ok + end + end, + Files). + +compile_and_load(Mod) -> + {Mod, Beam, _} = code:get_object_code(Mod), + {ok, _} = hipe:compile(Mod, [], Beam, [o3, load]). + +compile_and_save(Module, Dir) -> + {Module, BeamCode, _} = code:get_object_code(Module), + BeamName = filename:join([Dir, atom_to_list(Module) ++ ".beam"]), + {ok, {Architecture, NativeCode}} = hipe:compile(Module, [], BeamCode, [o3]), + {ok, _, Chunks0} = beam_lib:all_chunks(BeamCode), + ChunkName = hipe_unified_loader:chunk_name(Architecture), + Chunks1 = lists:keydelete(ChunkName, 1, Chunks0), + Chunks = Chunks1 ++ [{ChunkName,NativeCode}], + {ok, BeamPlusNative} = beam_lib:build_module(Chunks), + ok = file:write_file(BeamName, BeamPlusNative), + BeamName. + +can_hipe_compile() -> + code:which(hipe) =/= non_existing. From 42fc7814bf7058637f45621bba4912c432b894df Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Sat, 21 May 2016 20:03:34 +0300 Subject: [PATCH 103/174] Naming --- src/rabbit_hipe.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_hipe.erl b/src/rabbit_hipe.erl index 494f0e06b7c5..d4597d4efcfa 100644 --- a/src/rabbit_hipe.erl +++ b/src/rabbit_hipe.erl @@ -41,7 +41,7 @@ compile_to_directory(Dir0) -> ok = prepare_ebin_directory(Dir), hipe_compile(fun (Mod) -> compile_and_save(Mod, Dir) end, true). -need_compile(Mod, Force) -> +needs_compilation(Mod, Force) -> Exists = code:which(Mod) =/= non_existing, %% We skip modules already natively compiled. This %% happens when RabbitMQ is stopped (just the @@ -56,7 +56,7 @@ need_compile(Mod, Force) -> %% progress via stdout. hipe_compile(CompileFun, Force) -> {ok, HipeModulesAll} = application:get_env(rabbit, hipe_modules), - HipeModules = lists:filter(fun(Mod) -> need_compile(Mod, Force) end, HipeModulesAll), + HipeModules = lists:filter(fun(Mod) -> needs_compilation(Mod, Force) end, HipeModulesAll), case HipeModules of [] -> {ok, already_compiled}; _ -> do_hipe_compile(HipeModules, CompileFun) From 228a78e703b3cf00d546a4a93320616ec52ec2e7 Mon Sep 17 00:00:00 2001 From: Alexey Lebedeff Date: Fri, 20 May 2016 18:12:31 +0300 Subject: [PATCH 104/174] Enable systemd triggers in deb-package Fixes #570 With this patch systemd service file will be used on systemd-enabled hosts, providing more seamless experience. I've tested it by building it on ubuntu 14.04, which is not managed by systemd. Still, it has necessary packages to build systemd-aware debs. And then I tried to install this package: - On ubuntu 14.04 - regular init script was used - On debian jessie with systemd and esl-erlang 18.3 - server was properly started using systemd service file (and epmd dependency was also automatically started by systemd) --- packaging/debs/Debian/debian/control | 3 ++- .../debs/Debian/debian/rabbitmq-server.service | 18 ++++++++++++++++++ packaging/debs/Debian/debian/rules | 2 +- 3 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 packaging/debs/Debian/debian/rabbitmq-server.service diff --git a/packaging/debs/Debian/debian/control b/packaging/debs/Debian/debian/control index 9cf494ab8774..29ea81049c49 100644 --- a/packaging/debs/Debian/debian/control +++ b/packaging/debs/Debian/debian/control @@ -7,11 +7,12 @@ Uploaders: Alvaro Videla , Jean-Sébastien Pédron , Giuseppe Privitera Build-Depends: debhelper (>= 9), + dh-systemd (>= 1.5), erlang-dev, python-simplejson, xmlto, xsltproc, - erlang-nox (>= 1:16.b.3), + erlang-nox (>= 1:16.b.3) | esl-erlang, zip, rsync Standards-Version: 3.9.4 diff --git a/packaging/debs/Debian/debian/rabbitmq-server.service b/packaging/debs/Debian/debian/rabbitmq-server.service new file mode 100644 index 000000000000..1aa6549b64b7 --- /dev/null +++ b/packaging/debs/Debian/debian/rabbitmq-server.service @@ -0,0 +1,18 @@ +# systemd unit example +[Unit] +Description=RabbitMQ broker +After=network.target epmd@0.0.0.0.socket +Wants=network.target epmd@0.0.0.0.socket + +[Service] +Type=notify +User=rabbitmq +Group=rabbitmq +NotifyAccess=all +TimeoutStartSec=3600 +WorkingDirectory=/var/lib/rabbitmq +ExecStart=/usr/lib/rabbitmq/bin/rabbitmq-server +ExecStop=/usr/lib/rabbitmq/bin/rabbitmqctl stop + +[Install] +WantedBy=multi-user.target diff --git a/packaging/debs/Debian/debian/rules b/packaging/debs/Debian/debian/rules index 053df181150f..770eeb0ea523 100755 --- a/packaging/debs/Debian/debian/rules +++ b/packaging/debs/Debian/debian/rules @@ -8,7 +8,7 @@ DEB_DESTDIR = debian/rabbitmq-server VERSION = $(shell dpkg-parsechangelog | awk '/^Version:/ {version=$$0; sub(/Version: /, "", version); sub(/-.*/, "", version); print version;}') %: - dh $@ --parallel + dh $@ --parallel --with systemd override_dh_auto_clean: $(MAKE) clean distclean-manpages From e05c64405020de144487be33f5e0c40cd0fba31e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Thu, 12 May 2016 15:34:31 +0200 Subject: [PATCH 105/174] Deb repository: Use SIGNING_KEY to select the repository signing key By default, honor the default key; usually it is specified in `gpg.conf`. References #718. [#118296861] --- packaging/debs/apt-repository/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/debs/apt-repository/Makefile b/packaging/debs/apt-repository/Makefile index bbddc15a4e6c..dbf8871a51a0 100644 --- a/packaging/debs/apt-repository/Makefile +++ b/packaging/debs/apt-repository/Makefile @@ -1,7 +1,7 @@ PACKAGES_DIR ?= ../../../PACKAGES REPO_DIR ?= debian -SIGNING_USER_EMAIL ?= info@rabbitmq.com +SIGNING_KEY ?= default ifeq "$(UNOFFICIAL_RELEASE)" "" HOME_ARG = HOME=$(GNUPG_PATH) @@ -18,7 +18,7 @@ debian_apt_repository: clean mkdir -p $(REPO_DIR)/conf cp -a distributions $(REPO_DIR)/conf ifeq "$(UNOFFICIAL_RELEASE)" "" - echo SignWith: $(SIGNING_USER_EMAIL) >> $(REPO_DIR)/conf/distributions + echo SignWith: $(SIGNING_KEY) >> $(REPO_DIR)/conf/distributions endif for FILE in $(PACKAGES_DIR)/*.changes ; do \ $(HOME_ARG) reprepro --ignore=wrongdistribution \ From 7e2b42209e60b8a04e4b60d7f5286ae0505ea1cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Thu, 12 May 2016 15:48:41 +0200 Subject: [PATCH 106/174] packaging: We only use the signing key ID This is the only way to select a specific subkey. The user ID is useless for that. References #718. [#118296861] --- packaging/Makefile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packaging/Makefile b/packaging/Makefile index da3dcccb60be..31b85d9c1f09 100644 --- a/packaging/Makefile +++ b/packaging/Makefile @@ -62,9 +62,7 @@ endif VARS = SOURCE_DIST_FILE="$(abspath $(SOURCE_DIST_FILE))" \ PACKAGES_DIR="$(abspath $(PACKAGES_DIR))" \ - SIGNING_KEY="$(SIGNING_KEY)" \ - SIGNING_USER_ID="$(SIGNING_USER_ID)" \ - SIGNING_USER_EMAIL="$(SIGNING_USER_EMAIL)" + SIGNING_KEY="$(SIGNING_KEY)" packages: package-deb package-rpm package-windows package-generic-unix @: From bf5879e4ec987ca5a446cc03c2262b10bad637ba Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Thu, 26 May 2016 16:05:16 +0100 Subject: [PATCH 107/174] Initialise wait_timeout in slave promotion --- src/rabbit_mirror_queue_master.erl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 8246fcea7eaa..9674a4ef2c40 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -518,6 +518,7 @@ promote_backing_queue_state(QName, CPid, BQ, BQS, GM, AckTags, Seen, KS) -> Depth = BQ:depth(BQS1), true = Len == Depth, %% ASSERTION: everything must have been requeued ok = gm:broadcast(GM, {depth, Depth}), + WaitTimeout = rabbit_misc:get_env(rabbit, slave_wait_timeout, 15000), #state { name = QName, gm = GM, coordinator = CPid, @@ -525,7 +526,8 @@ promote_backing_queue_state(QName, CPid, BQ, BQS, GM, AckTags, Seen, KS) -> backing_queue_state = BQS1, seen_status = Seen, confirmed = [], - known_senders = sets:from_list(KS) }. + known_senders = sets:from_list(KS), + wait_timeout = WaitTimeout }. sender_death_fun() -> Self = self(), From becf2c7ecad6203d6b6705a0d32fdf3f7893bba3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Tue, 5 Apr 2016 18:42:17 +0200 Subject: [PATCH 108/174] make tests: Do not run tests from rabbitmq-test We are about to switch everything to common_test and rabbitmq-test will become unnecessary for the broker. --- Makefile | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/Makefile b/Makefile index 1d5ce3f2b499..867ecdf978e7 100644 --- a/Makefile +++ b/Makefile @@ -106,22 +106,8 @@ clean-extra-sources: # Tests. # -------------------------------------------------------------------- -TARGETS_IN_RABBITMQ_TEST = $(patsubst %,%-in-rabbitmq_test,\ - tests full unit lite conformance16 lazy-vq-tests) - -.PHONY: $(TARGETS_IN_RABBITMQ_TEST) - TEST_ERLC_OPTS += $(RMQ_ERLC_OPTS) -tests:: tests-in-rabbitmq_test - -$(TARGETS_IN_RABBITMQ_TEST): $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \ - test-build $(DEPS_DIR)/rabbitmq_test - $(MAKE) -C $(DEPS_DIR)/rabbitmq_test \ - IS_DEP=1 \ - RABBITMQ_BROKER_DIR=$(RABBITMQ_BROKER_DIR) \ - $(patsubst %-in-rabbitmq_test,%,$@) - # -------------------------------------------------------------------- # Documentation. # -------------------------------------------------------------------- From 8dbc931d46eb85dfe5674bd5c1a16a5df2cf3161 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Wed, 6 Apr 2016 12:59:14 +0200 Subject: [PATCH 109/174] rabbit_ct_broker_helpers: New helpers specific to the broker testsuite In particular, it allows to run tests on the broker node itself. References #725. [#116526487] --- test/rabbit_ct_broker_helpers.erl | 78 +++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 test/rabbit_ct_broker_helpers.erl diff --git a/test/rabbit_ct_broker_helpers.erl b/test/rabbit_ct_broker_helpers.erl new file mode 100644 index 000000000000..52e3b67dd14d --- /dev/null +++ b/test/rabbit_ct_broker_helpers.erl @@ -0,0 +1,78 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. +%% + +-module(rabbit_ct_broker_helpers). + +-include_lib("rabbit_common/include/rabbit.hrl"). + +-export([ + run_on_broker/4, + find_listener/0, + test_channel/0 + ]). + +run_on_broker(Node, Module, Function, Args) -> + %% We add some directories to the broker node search path. + Path1 = filename:dirname(code:which(Module)), + Path2 = filename:dirname(code:which(?MODULE)), + Paths = lists:usort([Path1, Path2]), + ExistingPaths = rpc:call(Node, code, get_path, []), + lists:foreach( + fun(P) -> + case lists:member(P, ExistingPaths) of + true -> ok; + false -> true = rpc:call(Node, code, add_pathz, [P]) + end + end, Paths), + %% If there is an exception, rpc:call/4 returns the exception as + %% a "normal" return value. If there is an exit signal, we raise + %% it again. In both cases, we have no idea of the module and line + %% number which triggered the issue. + case rpc:call(Node, Module, Function, Args) of + {badrpc, {'EXIT', Reason}} -> exit(Reason); + {badrpc, Reason} -> exit(Reason); + Ret -> Ret + end. + +find_listener() -> + [#listener{host = H, port = P} | _] = + [L || L = #listener{node = N, protocol = amqp} + <- rabbit_networking:active_listeners(), + N =:= node()], + {H, P}. + +user(Username) -> + #user{username = Username, + tags = [administrator], + authz_backends = [{rabbit_auth_backend_internal, none}]}. + +test_channel() -> + Me = self(), + Writer = spawn(fun () -> test_writer(Me) end), + {ok, Limiter} = rabbit_limiter:start_link(no_id), + {ok, Ch} = rabbit_channel:start_link( + 1, Me, Writer, Me, "", rabbit_framing_amqp_0_9_1, + user(<<"guest">>), <<"/">>, [], Me, Limiter), + {Writer, Limiter, Ch}. + +test_writer(Pid) -> + receive + {'$gen_call', From, flush} -> gen_server:reply(From, ok), + test_writer(Pid); + {send_command, Method} -> Pid ! Method, + test_writer(Pid); + shutdown -> ok + end. From 4d2e3bbcbbfee4d72de3ddf4f610c5a2834ae0a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Tue, 5 Apr 2016 18:44:54 +0200 Subject: [PATCH 110/174] Switch testsuite to common_test, part #1 The migrated tests are the external ones executed from `rabbit_tests.erl`: o credit_flow_test o gm_tests o mirrored_supervisor_tests o on_disk_store_tunable_parameter_validation_test o password_hashing_tests o rabbit_ctl_timeout_tests o rabbit_resource_monitor_misc_test o supervisor2_tests o truncate o vm_memory_monitor_tests References #725. [#116526487] --- .gitignore | 2 + Makefile | 2 + src/truncate.erl | 75 +----- test/credit_flow_SUITE.erl | 77 ++++++ test/gm_SUITE.erl | 205 ++++++++++++++++ test/mirrored_supervisor_SUITE.erl | 335 ++++++++++++++++++++++++++ test/mirrored_supervisor_SUITE_gs.erl | 66 +++++ test/msg_store_SUITE.erl | 62 +++++ test/password_hashing_SUITE.erl | 121 ++++++++++ test/rabbit_ctl_timeout_SUITE.erl | 173 +++++++++++++ test/resource_monitor_SUITE.erl | 64 +++++ test/supervisor2_SUITE.erl | 90 +++++++ test/truncate_SUITE.erl | 99 ++++++++ test/vm_memory_monitor_SUITE.erl | 42 ++++ 14 files changed, 1342 insertions(+), 71 deletions(-) create mode 100644 test/credit_flow_SUITE.erl create mode 100644 test/gm_SUITE.erl create mode 100644 test/mirrored_supervisor_SUITE.erl create mode 100644 test/mirrored_supervisor_SUITE_gs.erl create mode 100644 test/msg_store_SUITE.erl create mode 100644 test/password_hashing_SUITE.erl create mode 100644 test/rabbit_ctl_timeout_SUITE.erl create mode 100644 test/resource_monitor_SUITE.erl create mode 100644 test/supervisor2_SUITE.erl create mode 100644 test/truncate_SUITE.erl create mode 100644 test/vm_memory_monitor_SUITE.erl diff --git a/.gitignore b/.gitignore index 3e0b54878986..720f17d9b640 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ .sw? .*.sw? *.beam +*.coverdata .erlang.mk/ cover/ debug/ @@ -11,6 +12,7 @@ ebin/ etc/ logs/ plugins/ +test/ct.cover.spec PACKAGES/ diff --git a/Makefile b/Makefile index 867ecdf978e7..266a4ed13ddc 100644 --- a/Makefile +++ b/Makefile @@ -25,6 +25,8 @@ DEP_PLUGINS = rabbit_common/mk/rabbitmq-run.mk \ rabbit_common/mk/rabbitmq-dist.mk \ rabbit_common/mk/rabbitmq-tools.mk +CT_OPTS += -ct_hooks cth_surefire + # FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be # reviewed and merged. diff --git a/src/truncate.erl b/src/truncate.erl index 1c9b08ed2746..a1586b0cb015 100644 --- a/src/truncate.erl +++ b/src/truncate.erl @@ -21,8 +21,10 @@ -record(params, {content, struct, content_dec, struct_dec}). -export([log_event/2, term/2]). -%% exported for testing --export([test/0]). + +-ifdef(TEST). +-export([term_size/3]). +-endif. log_event({Type, GL, {Pid, Format, Args}}, Params) when Type =:= error orelse @@ -123,72 +125,3 @@ tuple_term_size(_T, M, I, S, _W) when I > S -> M; tuple_term_size(T, M, I, S, W) -> tuple_term_size(T, lim(term_size(element(I, T), M, W), 2 * W), I + 1, S, W). - -%%---------------------------------------------------------------------------- - -test() -> - test_short_examples_exactly(), - test_term_limit(), - test_large_examples_for_size(), - ok. - -test_short_examples_exactly() -> - F = fun (Term, Exp) -> - Exp = term(Term, {1, {10, 10, 5, 5}}), - Term = term(Term, {100000, {10, 10, 5, 5}}) - end, - FSmall = fun (Term, Exp) -> - Exp = term(Term, {1, {2, 2, 2, 2}}), - Term = term(Term, {100000, {2, 2, 2, 2}}) - end, - F([], []), - F("h", "h"), - F("hello world", "hello w..."), - F([[h,e,l,l,o,' ',w,o,r,l,d]], [[h,e,l,l,o,'...']]), - F([a|b], [a|b]), - F(<<"hello">>, <<"hello">>), - F([<<"hello world">>], [<<"he...">>]), - F(<<1:1>>, <<1:1>>), - F(<<1:81>>, <<0:56, "...">>), - F({{{{a}}},{b},c,d,e,f,g,h,i,j,k}, {{{'...'}},{b},c,d,e,f,g,h,i,j,'...'}), - FSmall({a,30,40,40,40,40}, {a,30,'...'}), - FSmall([a,30,40,40,40,40], [a,30,'...']), - P = spawn(fun() -> receive die -> ok end end), - F([0, 0.0, <<1:1>>, F, P], [0, 0.0, <<1:1>>, F, P]), - P ! die, - R = make_ref(), - F([R], [R]), - ok. - -test_term_limit() -> - W = erlang:system_info(wordsize), - S = <<"abc">>, - 1 = term_size(S, 4, W), - limit_exceeded = term_size(S, 3, W), - case 100 - term_size([S, S], 100, W) of - 22 -> ok; %% 32 bit - 38 -> ok %% 64 bit - end, - case 100 - term_size([S, [S]], 100, W) of - 30 -> ok; %% ditto - 54 -> ok - end, - limit_exceeded = term_size([S, S], 6, W), - ok. - -test_large_examples_for_size() -> - %% Real world values - Shrink = fun(Term) -> term(Term, {1, {1000, 100, 50, 5}}) end, - TestSize = fun(Term) -> - true = 5000000 < size(term_to_binary(Term)), - true = 500000 > size(term_to_binary(Shrink(Term))) - end, - TestSize(lists:seq(1, 5000000)), - TestSize(recursive_list(1000, 10)), - TestSize(recursive_list(5000, 20)), - TestSize(gb_sets:from_list([I || I <- lists:seq(1, 1000000)])), - TestSize(gb_trees:from_orddict([{I, I} || I <- lists:seq(1, 1000000)])), - ok. - -recursive_list(S, 0) -> lists:seq(1, S); -recursive_list(S, N) -> [recursive_list(S div N, N-1) || _ <- lists:seq(1, S)]. diff --git a/test/credit_flow_SUITE.erl b/test/credit_flow_SUITE.erl new file mode 100644 index 000000000000..5747f8c8d8f7 --- /dev/null +++ b/test/credit_flow_SUITE.erl @@ -0,0 +1,77 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved. +%% + +-module(credit_flow_SUITE). + +-include_lib("common_test/include/ct.hrl"). + +-compile(export_all). + +all() -> + [ + credit_flow_settings + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(?MODULE, Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +%% --------------------------------------------------------------------------- +%% Test cases +%% --------------------------------------------------------------------------- + +credit_flow_settings(Config) -> + passed = rabbit_ct_broker_helpers:run_test_on_broker( + ?config(rmq_nodename, Config), + ?MODULE, credit_flow_settings1, [Config]). + +credit_flow_settings1(_Config) -> + %% default values + passed = test_proc(200, 50), + + application:set_env(rabbit, credit_flow_default_credit, {100, 20}), + passed = test_proc(100, 20), + + application:unset_env(rabbit, credit_flow_default_credit), + + % back to defaults + passed = test_proc(200, 50), + passed. + +test_proc(InitialCredit, MoreCreditAfter) -> + Pid = spawn(fun dummy/0), + Pid ! {credit, self()}, + {InitialCredit, MoreCreditAfter} = + receive + {credit, Val} -> Val + end, + passed. + +dummy() -> + credit_flow:send(self()), + receive + {credit, From} -> + From ! {credit, get(credit_flow_default_credit)}; + _ -> + dummy() + end. diff --git a/test/gm_SUITE.erl b/test/gm_SUITE.erl new file mode 100644 index 000000000000..f5ccf75b7032 --- /dev/null +++ b/test/gm_SUITE.erl @@ -0,0 +1,205 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. +%% + +-module(gm_SUITE). + +-behaviour(gm). + +-include_lib("common_test/include/ct.hrl"). + +-include("gm_specs.hrl"). + +-compile(export_all). + +-define(RECEIVE_OR_THROW(Body, Bool, Error), + receive Body -> + true = Bool, + passed + after 1000 -> + throw(Error) + end). + +all() -> + [ + join_leave, + broadcast, + confirmed_broadcast, + member_death, + receive_in_order + ]. + +init_per_suite(Config) -> + ok = application:set_env(mnesia, dir, ?config(priv_dir, Config)), + ok = application:start(mnesia), + {ok, FHC} = file_handle_cache:start_link(), + unlink(FHC), + {ok, WPS} = worker_pool_sup:start_link(), + unlink(WPS), + rabbit_ct_helpers:set_config(Config, [ + {file_handle_cache_pid, FHC}, + {worker_pool_sup_pid, WPS} + ]). + +end_per_suite(Config) -> + exit(?config(worker_pool_sup_pid, Config), shutdown), + exit(?config(file_handle_cache_pid, Config), shutdown), + ok = application:stop(mnesia), + Config. + +%% --------------------------------------------------------------------------- +%% Functional tests +%% --------------------------------------------------------------------------- + +join_leave(_Config) -> + passed = with_two_members(fun (_Pid, _Pid2) -> passed end). + +broadcast(_Config) -> + passed = do_broadcast(fun gm:broadcast/2). + +confirmed_broadcast(_Config) -> + passed = do_broadcast(fun gm:confirmed_broadcast/2). + +member_death(_Config) -> + passed = with_two_members( + fun (Pid, Pid2) -> + {ok, Pid3} = gm:start_link( + ?MODULE, ?MODULE, self(), + fun rabbit_misc:execute_mnesia_transaction/1), + passed = receive_joined(Pid3, [Pid, Pid2, Pid3], + timeout_joining_gm_group_3), + passed = receive_birth(Pid, Pid3, timeout_waiting_for_birth_3_1), + passed = receive_birth(Pid2, Pid3, timeout_waiting_for_birth_3_2), + + unlink(Pid3), + exit(Pid3, kill), + + %% Have to do some broadcasts to ensure that all members + %% find out about the death. + passed = (broadcast_fun(fun gm:confirmed_broadcast/2))( + Pid, Pid2), + + passed = receive_death(Pid, Pid3, timeout_waiting_for_death_3_1), + passed = receive_death(Pid2, Pid3, timeout_waiting_for_death_3_2), + + passed + end). + +receive_in_order(_Config) -> + passed = with_two_members( + fun (Pid, Pid2) -> + Numbers = lists:seq(1,1000), + [begin ok = gm:broadcast(Pid, N), ok = gm:broadcast(Pid2, N) end + || N <- Numbers], + passed = receive_numbers( + Pid, Pid, {timeout_for_msgs, Pid, Pid}, Numbers), + passed = receive_numbers( + Pid, Pid2, {timeout_for_msgs, Pid, Pid2}, Numbers), + passed = receive_numbers( + Pid2, Pid, {timeout_for_msgs, Pid2, Pid}, Numbers), + passed = receive_numbers( + Pid2, Pid2, {timeout_for_msgs, Pid2, Pid2}, Numbers), + passed + end). + +do_broadcast(Fun) -> + with_two_members(broadcast_fun(Fun)). + +broadcast_fun(Fun) -> + fun (Pid, Pid2) -> + ok = Fun(Pid, magic_message), + passed = receive_or_throw({msg, Pid, Pid, magic_message}, + timeout_waiting_for_msg), + passed = receive_or_throw({msg, Pid2, Pid, magic_message}, + timeout_waiting_for_msg) + end. + +with_two_members(Fun) -> + ok = gm:create_tables(), + + {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(), + fun rabbit_misc:execute_mnesia_transaction/1), + passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1), + + {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self(), + fun rabbit_misc:execute_mnesia_transaction/1), + passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2), + passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2), + + passed = Fun(Pid, Pid2), + + ok = gm:leave(Pid), + passed = receive_death(Pid2, Pid, timeout_waiting_for_death_1), + passed = + receive_termination(Pid, normal, timeout_waiting_for_termination_1), + + ok = gm:leave(Pid2), + passed = + receive_termination(Pid2, normal, timeout_waiting_for_termination_2), + + receive X -> throw({unexpected_message, X}) + after 0 -> passed + end. + +receive_or_throw(Pattern, Error) -> + ?RECEIVE_OR_THROW(Pattern, true, Error). + +receive_birth(From, Born, Error) -> + ?RECEIVE_OR_THROW({members_changed, From, Birth, Death}, + ([Born] == Birth) andalso ([] == Death), + Error). + +receive_death(From, Died, Error) -> + ?RECEIVE_OR_THROW({members_changed, From, Birth, Death}, + ([] == Birth) andalso ([Died] == Death), + Error). + +receive_joined(From, Members, Error) -> + ?RECEIVE_OR_THROW({joined, From, Members1}, + lists:usort(Members) == lists:usort(Members1), + Error). + +receive_termination(From, Reason, Error) -> + ?RECEIVE_OR_THROW({termination, From, Reason1}, + Reason == Reason1, + Error). + +receive_numbers(_Pid, _Sender, _Error, []) -> + passed; +receive_numbers(Pid, Sender, Error, [N | Numbers]) -> + ?RECEIVE_OR_THROW({msg, Pid, Sender, M}, + M == N, + Error), + receive_numbers(Pid, Sender, Error, Numbers). + +%% ------------------------------------------------------------------- +%% gm behavior callbacks. +%% ------------------------------------------------------------------- + +joined(Pid, Members) -> + Pid ! {joined, self(), Members}, + ok. + +members_changed(Pid, Births, Deaths) -> + Pid ! {members_changed, self(), Births, Deaths}, + ok. + +handle_msg(Pid, From, Msg) -> + Pid ! {msg, self(), From, Msg}, + ok. + +handle_terminate(Pid, Reason) -> + Pid ! {termination, self(), Reason}, + ok. diff --git a/test/mirrored_supervisor_SUITE.erl b/test/mirrored_supervisor_SUITE.erl new file mode 100644 index 000000000000..5ed17c90bbb8 --- /dev/null +++ b/test/mirrored_supervisor_SUITE.erl @@ -0,0 +1,335 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved. +%% + +-module(mirrored_supervisor_SUITE). + +-behaviour(mirrored_supervisor). + +-include_lib("common_test/include/ct.hrl"). + +-compile(export_all). + +-define(MS, mirrored_supervisor). +-define(SERVER, mirrored_supervisor_SUITE_gs). + +all() -> + [ + migrate, + migrate_twice, + already_there, + delete_restart, + which_children, + large_group, + childspecs_at_init, + anonymous_supervisors, + no_migration_on_shutdown, + start_idempotence, + unsupported, + ignore, + startup_failure + ]. + +init_per_suite(Config) -> + ok = application:set_env(mnesia, dir, ?config(priv_dir, Config)), + ok = application:start(mnesia), + lists:foreach( + fun ({Tab, TabDef}) -> + TabDef1 = proplists:delete(match, TabDef), + case mnesia:create_table(Tab, TabDef1) of + {atomic, ok} -> + ok; + {aborted, Reason} -> + throw({error, + {table_creation_failed, Tab, TabDef1, Reason}}) + end + end, mirrored_supervisor:table_definitions()), + Config. + +end_per_suite(Config) -> + ok = application:stop(mnesia), + Config. + +%% --------------------------------------------------------------------------- +%% Functional tests +%% --------------------------------------------------------------------------- + +%% Simplest test +migrate(_Config) -> + passed = with_sups( + fun([A, _]) -> + {ok, _} = ?MS:start_child(a, childspec(worker)), + Pid1 = pid_of(worker), + kill_registered(A, Pid1), + Pid2 = pid_of(worker), + false = (Pid1 =:= Pid2) + end, [a, b]). + +%% Is migration transitive? +migrate_twice(_Config) -> + passed = with_sups( + fun([A, B]) -> + {ok, _} = ?MS:start_child(a, childspec(worker)), + Pid1 = pid_of(worker), + kill_registered(A, Pid1), + {ok, C} = start_sup(c), + Pid2 = pid_of(worker), + kill_registered(B, Pid2), + Pid3 = pid_of(worker), + false = (Pid1 =:= Pid3), + kill(C) + end, [a, b]). + +%% Can't start the same child twice +already_there(_Config) -> + passed = with_sups( + fun([_, _]) -> + S = childspec(worker), + {ok, Pid} = ?MS:start_child(a, S), + {error, {already_started, Pid}} = ?MS:start_child(b, S) + end, [a, b]). + +%% Deleting and restarting should work as per a normal supervisor +delete_restart(_Config) -> + passed = with_sups( + fun([_, _]) -> + S = childspec(worker), + {ok, Pid1} = ?MS:start_child(a, S), + {error, running} = ?MS:delete_child(a, worker), + ok = ?MS:terminate_child(a, worker), + ok = ?MS:delete_child(a, worker), + {ok, Pid2} = ?MS:start_child(b, S), + false = (Pid1 =:= Pid2), + ok = ?MS:terminate_child(b, worker), + {ok, Pid3} = ?MS:restart_child(b, worker), + Pid3 = pid_of(worker), + false = (Pid2 =:= Pid3), + %% Not the same supervisor as the worker is on + ok = ?MS:terminate_child(a, worker), + ok = ?MS:delete_child(a, worker), + {ok, Pid4} = ?MS:start_child(a, S), + false = (Pid3 =:= Pid4) + end, [a, b]). + +which_children(_Config) -> + passed = with_sups( + fun([A, B] = Both) -> + ?MS:start_child(A, childspec(worker)), + assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end), + ok = ?MS:terminate_child(a, worker), + assert_wc(Both, fun ([C]) -> undefined = wc_pid(C) end), + {ok, _} = ?MS:restart_child(a, worker), + assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end), + ?MS:start_child(B, childspec(worker2)), + assert_wc(Both, fun (C) -> 2 = length(C) end) + end, [a, b]). + +assert_wc(Sups, Fun) -> + [Fun(?MS:which_children(Sup)) || Sup <- Sups]. + +wc_pid(Child) -> + {worker, Pid, worker, [?MODULE]} = Child, + Pid. + +%% Not all the members of the group should actually do the failover +large_group(_Config) -> + passed = with_sups( + fun([A, _, _, _]) -> + {ok, _} = ?MS:start_child(a, childspec(worker)), + Pid1 = pid_of(worker), + kill_registered(A, Pid1), + Pid2 = pid_of(worker), + false = (Pid1 =:= Pid2) + end, [a, b, c, d]). + +%% Do childspecs work when returned from init? +childspecs_at_init(_Config) -> + S = childspec(worker), + passed = with_sups( + fun([A, _]) -> + Pid1 = pid_of(worker), + kill_registered(A, Pid1), + Pid2 = pid_of(worker), + false = (Pid1 =:= Pid2) + end, [{a, [S]}, {b, [S]}]). + +anonymous_supervisors(_Config) -> + passed = with_sups( + fun([A, _B]) -> + {ok, _} = ?MS:start_child(A, childspec(worker)), + Pid1 = pid_of(worker), + kill_registered(A, Pid1), + Pid2 = pid_of(worker), + false = (Pid1 =:= Pid2) + end, [anon, anon]). + +%% When a mirrored_supervisor terminates, we should not migrate, but +%% the whole supervisor group should shut down. To test this we set up +%% a situation where the gen_server will only fail if it's running +%% under the supervisor called 'evil'. It should not migrate to +%% 'good' and survive, rather the whole group should go away. +no_migration_on_shutdown(_Config) -> + passed = with_sups( + fun([Evil, _]) -> + {ok, _} = ?MS:start_child(Evil, childspec(worker)), + try + call(worker, ping, 1000, 100), + exit(worker_should_not_have_migrated) + catch exit:{timeout_waiting_for_server, _, _} -> + ok + end + end, [evil, good]). + +start_idempotence(_Config) -> + passed = with_sups( + fun([_]) -> + CS = childspec(worker), + {ok, Pid} = ?MS:start_child(a, CS), + {error, {already_started, Pid}} = ?MS:start_child(a, CS), + ?MS:terminate_child(a, worker), + {error, already_present} = ?MS:start_child(a, CS) + end, [a]). + +unsupported(_Config) -> + try + ?MS:start_link({global, foo}, get_group(group), fun tx_fun/1, ?MODULE, + {one_for_one, []}), + exit(no_global) + catch error:badarg -> + ok + end, + try + {ok, _} = ?MS:start_link({local, foo}, get_group(group), + fun tx_fun/1, ?MODULE, {simple_one_for_one, []}), + exit(no_sofo) + catch error:badarg -> + ok + end. + +%% Just test we don't blow up +ignore(_Config) -> + ?MS:start_link({local, foo}, get_group(group), fun tx_fun/1, ?MODULE, + {fake_strategy_for_ignore, []}). + +startup_failure(_Config) -> + [test_startup_failure(F) || F <- [want_error, want_exit]]. + +test_startup_failure(Fail) -> + process_flag(trap_exit, true), + ?MS:start_link(get_group(group), fun tx_fun/1, ?MODULE, + {one_for_one, [childspec(Fail)]}), + receive + {'EXIT', _, shutdown} -> + ok + after 1000 -> + exit({did_not_exit, Fail}) + end, + process_flag(trap_exit, false). + +%% --------------------------------------------------------------------------- + +with_sups(Fun, Sups) -> + inc_group(), + Pids = [begin {ok, Pid} = start_sup(Sup), Pid end || Sup <- Sups], + Fun(Pids), + [kill(Pid) || Pid <- Pids, is_process_alive(Pid)], + timer:sleep(500), + passed. + +start_sup(Spec) -> + start_sup(Spec, group). + +start_sup({Name, ChildSpecs}, Group) -> + {ok, Pid} = start_sup0(Name, get_group(Group), ChildSpecs), + %% We are not a supervisor, when we kill the supervisor we do not + %% want to die! + unlink(Pid), + {ok, Pid}; + +start_sup(Name, Group) -> + start_sup({Name, []}, Group). + +start_sup0(anon, Group, ChildSpecs) -> + ?MS:start_link(Group, fun tx_fun/1, ?MODULE, + {one_for_one, ChildSpecs}); + +start_sup0(Name, Group, ChildSpecs) -> + ?MS:start_link({local, Name}, Group, fun tx_fun/1, ?MODULE, + {one_for_one, ChildSpecs}). + +childspec(Id) -> + {Id,{?SERVER, start_link, [Id]}, transient, 16#ffffffff, worker, [?MODULE]}. + +pid_of(Id) -> + {received, Pid, ping} = call(Id, ping), + Pid. + +tx_fun(Fun) -> + case mnesia:sync_transaction(Fun) of + {atomic, Result} -> Result; + {aborted, Reason} -> throw({error, Reason}) + end. + +inc_group() -> + Count = case get(counter) of + undefined -> 0; + C -> C + end + 1, + put(counter, Count). + +get_group(Group) -> + {Group, get(counter)}. + +call(Id, Msg) -> call(Id, Msg, 10*1000, 100). + +call(Id, Msg, 0, _Decr) -> + exit({timeout_waiting_for_server, {Id, Msg}, erlang:get_stacktrace()}); + +call(Id, Msg, MaxDelay, Decr) -> + try + gen_server:call(Id, Msg, infinity) + catch exit:_ -> timer:sleep(Decr), + call(Id, Msg, MaxDelay - Decr, Decr) + end. + +kill(Pid) -> kill(Pid, []). +kill(Pid, Wait) when is_pid(Wait) -> kill(Pid, [Wait]); +kill(Pid, Waits) -> + erlang:monitor(process, Pid), + [erlang:monitor(process, P) || P <- Waits], + exit(Pid, bang), + kill_wait(Pid), + [kill_wait(P) || P <- Waits]. + +kill_registered(Pid, Child) -> + {registered_name, Name} = erlang:process_info(Child, registered_name), + kill(Pid, Child), + false = (Child =:= whereis(Name)), + ok. + +kill_wait(Pid) -> + receive + {'DOWN', _Ref, process, Pid, _Reason} -> + ok + end. + +%% --------------------------------------------------------------------------- + +init({fake_strategy_for_ignore, _ChildSpecs}) -> + ignore; + +init({Strategy, ChildSpecs}) -> + {ok, {{Strategy, 0, 1}, ChildSpecs}}. + diff --git a/test/mirrored_supervisor_SUITE_gs.erl b/test/mirrored_supervisor_SUITE_gs.erl new file mode 100644 index 000000000000..867754b4a226 --- /dev/null +++ b/test/mirrored_supervisor_SUITE_gs.erl @@ -0,0 +1,66 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved. +%% + +-module(mirrored_supervisor_SUITE_gs). + +%% Dumb gen_server we can supervise + +-export([start_link/1]). + +-export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3, + handle_cast/2]). + +-behaviour(gen_server). + +-define(MS, mirrored_supervisor). + +start_link(want_error) -> + {error, foo}; + +start_link(want_exit) -> + exit(foo); + +start_link(Id) -> + gen_server:start_link({local, Id}, ?MODULE, [], []). + +%% --------------------------------------------------------------------------- + +init([]) -> + {ok, state}. + +handle_call(Msg, _From, State) -> + die_if_my_supervisor_is_evil(), + {reply, {received, self(), Msg}, State}. + +handle_cast(_Msg, State) -> + {noreply, State}. + +handle_info(_Info, State) -> + {noreply, State}. + +terminate(_Reason, _State) -> + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +die_if_my_supervisor_is_evil() -> + try lists:keysearch(self(), 2, ?MS:which_children(evil)) of + false -> ok; + _ -> exit(doooom) + catch + exit:{noproc, _} -> ok + end. diff --git a/test/msg_store_SUITE.erl b/test/msg_store_SUITE.erl new file mode 100644 index 000000000000..f63f6cb745b3 --- /dev/null +++ b/test/msg_store_SUITE.erl @@ -0,0 +1,62 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved. +%% + +-module(msg_store_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-compile(export_all). + +-define(T(Fun, Args), (catch apply(rabbit, Fun, Args))). + +all() -> + [ + parameter_validation + ]. + +parameter_validation(_Config) -> + %% make sure it works with default values + ok = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, + [?CREDIT_DISC_BOUND, ?IO_BATCH_SIZE]), + + %% IO_BATCH_SIZE must be greater than CREDIT_DISC_BOUND initial credit + ok = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, + [{2000, 500}, 3000]), + {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, + [{2000, 500}, 1500]), + + %% All values must be integers + {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, + [{2000, 500}, "1500"]), + {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, + [{"2000", 500}, abc]), + {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, + [{2000, "500"}, 2048]), + + %% CREDIT_DISC_BOUND must be a tuple + {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, + [[2000, 500], 1500]), + {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, + [2000, 1500]), + + %% config values can't be smaller than default values + {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, + [{1999, 500}, 2048]), + {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, + [{2000, 499}, 2048]), + {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, + [{2000, 500}, 2047]). diff --git a/test/password_hashing_SUITE.erl b/test/password_hashing_SUITE.erl new file mode 100644 index 000000000000..50f0f4e6d1d4 --- /dev/null +++ b/test/password_hashing_SUITE.erl @@ -0,0 +1,121 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved. +%% + +-module(password_hashing_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-compile(export_all). + +all() -> + [ + password_hashing, + change_password + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(?MODULE, Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +%% --------------------------------------------------------------------------- +%% Test cases +%% --------------------------------------------------------------------------- + +password_hashing(Config) -> + passed = rabbit_ct_broker_helpers:run_test_on_broker( + ?config(rmq_nodename, Config), + ?MODULE, password_hashing1, [Config]). + +password_hashing1(_Config) -> + rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(), + application:set_env(rabbit, password_hashing_module, + rabbit_password_hashing_md5), + rabbit_password_hashing_md5 = rabbit_password:hashing_mod(), + application:set_env(rabbit, password_hashing_module, + rabbit_password_hashing_sha256), + rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(), + + rabbit_password_hashing_sha256 = + rabbit_password:hashing_mod(rabbit_password_hashing_sha256), + rabbit_password_hashing_md5 = + rabbit_password:hashing_mod(rabbit_password_hashing_md5), + rabbit_password_hashing_md5 = + rabbit_password:hashing_mod(undefined), + + rabbit_password_hashing_md5 = + rabbit_auth_backend_internal:hashing_module_for_user( + #internal_user{}), + rabbit_password_hashing_md5 = + rabbit_auth_backend_internal:hashing_module_for_user( + #internal_user{ + hashing_algorithm = undefined + }), + rabbit_password_hashing_md5 = + rabbit_auth_backend_internal:hashing_module_for_user( + #internal_user{ + hashing_algorithm = rabbit_password_hashing_md5 + }), + + rabbit_password_hashing_sha256 = + rabbit_auth_backend_internal:hashing_module_for_user( + #internal_user{ + hashing_algorithm = rabbit_password_hashing_sha256 + }), + + passed. + +change_password(Config) -> + passed = rabbit_ct_broker_helpers:run_test_on_broker( + ?config(rmq_nodename, Config), + ?MODULE, change_password1, [Config]). + +change_password1(_Config) -> + UserName = <<"test_user">>, + Password = <<"test_password">>, + case rabbit_auth_backend_internal:lookup_user(UserName) of + {ok, _} -> rabbit_auth_backend_internal:delete_user(UserName); + _ -> ok + end, + ok = application:set_env(rabbit, password_hashing_module, + rabbit_password_hashing_md5), + ok = rabbit_auth_backend_internal:add_user(UserName, Password), + {ok, #auth_user{username = UserName}} = + rabbit_auth_backend_internal:user_login_authentication( + UserName, [{password, Password}]), + ok = application:set_env(rabbit, password_hashing_module, + rabbit_password_hashing_sha256), + {ok, #auth_user{username = UserName}} = + rabbit_auth_backend_internal:user_login_authentication( + UserName, [{password, Password}]), + + NewPassword = <<"test_password1">>, + ok = rabbit_auth_backend_internal:change_password(UserName, NewPassword), + {ok, #auth_user{username = UserName}} = + rabbit_auth_backend_internal:user_login_authentication( + UserName, [{password, NewPassword}]), + + {refused, _, [UserName]} = + rabbit_auth_backend_internal:user_login_authentication( + UserName, [{password, Password}]), + passed. diff --git a/test/rabbit_ctl_timeout_SUITE.erl b/test/rabbit_ctl_timeout_SUITE.erl new file mode 100644 index 000000000000..92b2867a5426 --- /dev/null +++ b/test/rabbit_ctl_timeout_SUITE.erl @@ -0,0 +1,173 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. +%% + +-module(rabbit_ctl_timeout_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-compile(export_all). + +-define(TIMEOUT_LIST_OPS_PASS, 1000). + +all() -> + [ + list_operations_timeout_pass + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(?MODULE, Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +%% ------------------------------------------------------------------- +%% Test cases. +%% ------------------------------------------------------------------- + +list_operations_timeout_pass(Config) -> + passed = rabbit_ct_broker_helpers:run_test_on_broker( + ?config(rmq_nodename, Config), + ?MODULE, list_operations_timeout_pass1, [Config]). + +list_operations_timeout_pass1(_Config) -> + %% create a few things so there is some useful information to list + {_Writer1, Limiter1, Ch1} = rabbit_ct_broker_helpers:test_channel(), + {_Writer2, Limiter2, Ch2} = rabbit_ct_broker_helpers:test_channel(), + + [Q, Q2] = [Queue || Name <- [<<"foo">>, <<"bar">>], + {new, Queue = #amqqueue{}} <- + [rabbit_amqqueue:declare( + rabbit_misc:r(<<"/">>, queue, Name), + false, false, [], none)]], + + ok = rabbit_amqqueue:basic_consume( + Q, true, Ch1, Limiter1, false, 0, <<"ctag1">>, true, [], + undefined), + ok = rabbit_amqqueue:basic_consume( + Q2, true, Ch2, Limiter2, false, 0, <<"ctag2">>, true, [], + undefined), + + %% list users + ok = rabbit_ct_helpers:control_action(add_user, ["foo", "bar"]), + {error, {user_already_exists, _}} = + rabbit_ct_helpers:control_action(add_user, ["foo", "bar"]), + ok = rabbit_ct_helpers:control_action_t(list_users, [], + ?TIMEOUT_LIST_OPS_PASS), + + %% list parameters + ok = rabbit_runtime_parameters_test:register(), + ok = rabbit_ct_helpers:control_action(set_parameter, + ["test", "good", "123"]), + ok = rabbit_ct_helpers:control_action_t(list_parameters, [], + ?TIMEOUT_LIST_OPS_PASS), + ok = rabbit_ct_helpers:control_action(clear_parameter, + ["test", "good"]), + rabbit_runtime_parameters_test:unregister(), + + %% list vhosts + ok = rabbit_ct_helpers:control_action(add_vhost, ["/testhost"]), + {error, {vhost_already_exists, _}} = + rabbit_ct_helpers:control_action(add_vhost, ["/testhost"]), + ok = rabbit_ct_helpers:control_action_t(list_vhosts, [], + ?TIMEOUT_LIST_OPS_PASS), + + %% list permissions + ok = rabbit_ct_helpers:control_action(set_permissions, + ["foo", ".*", ".*", ".*"], + [{"-p", "/testhost"}]), + ok = rabbit_ct_helpers:control_action_t(list_permissions, [], + [{"-p", "/testhost"}], + ?TIMEOUT_LIST_OPS_PASS), + + %% list user permissions + ok = rabbit_ct_helpers:control_action_t(list_user_permissions, ["foo"], + ?TIMEOUT_LIST_OPS_PASS), + + %% list policies + ok = rabbit_ct_helpers:control_action_opts(["set_policy", "name", ".*", + "{\"ha-mode\":\"all\"}"]), + ok = rabbit_ct_helpers:control_action_t(list_policies, [], + ?TIMEOUT_LIST_OPS_PASS), + ok = rabbit_ct_helpers:control_action(clear_policy, ["name"]), + + %% list queues + ok = rabbit_ct_helpers:info_action_t(list_queues, + rabbit_amqqueue:info_keys(), false, + ?TIMEOUT_LIST_OPS_PASS), + + %% list exchanges + ok = rabbit_ct_helpers:info_action_t(list_exchanges, + rabbit_exchange:info_keys(), true, + ?TIMEOUT_LIST_OPS_PASS), + + %% list bindings + ok = rabbit_ct_helpers:info_action_t(list_bindings, + rabbit_binding:info_keys(), true, + ?TIMEOUT_LIST_OPS_PASS), + + %% list connections + {H, P} = rabbit_ct_broker_helpers:find_listener(), + {ok, C1} = gen_tcp:connect(H, P, [binary, {active, false}]), + gen_tcp:send(C1, <<"AMQP", 0, 0, 9, 1>>), + {ok, <<1,0,0>>} = gen_tcp:recv(C1, 3, 100), + + {ok, C2} = gen_tcp:connect(H, P, [binary, {active, false}]), + gen_tcp:send(C2, <<"AMQP", 0, 0, 9, 1>>), + {ok, <<1,0,0>>} = gen_tcp:recv(C2, 3, 100), + + ok = rabbit_ct_helpers:info_action_t( + list_connections, rabbit_networking:connection_info_keys(), false, + ?TIMEOUT_LIST_OPS_PASS), + + %% list consumers + ok = rabbit_ct_helpers:info_action_t( + list_consumers, rabbit_amqqueue:consumer_info_keys(), false, + ?TIMEOUT_LIST_OPS_PASS), + + %% list channels + ok = rabbit_ct_helpers:info_action_t( + list_channels, rabbit_channel:info_keys(), false, + ?TIMEOUT_LIST_OPS_PASS), + + %% do some cleaning up + ok = rabbit_ct_helpers:control_action(delete_user, ["foo"]), + {error, {no_such_user, _}} = + rabbit_ct_helpers:control_action(delete_user, ["foo"]), + + ok = rabbit_ct_helpers:control_action(delete_vhost, ["/testhost"]), + {error, {no_such_vhost, _}} = + rabbit_ct_helpers:control_action(delete_vhost, ["/testhost"]), + + %% close_connection + Conns = rabbit_networking:connections(), + [ok = rabbit_ct_helpers:control_action( + close_connection, [rabbit_misc:pid_to_string(ConnPid), "go away"]) + || ConnPid <- Conns], + + %% cleanup queues + [{ok, _} = rabbit_amqqueue:delete(QR, false, false) || QR <- [Q, Q2]], + + [begin + unlink(Chan), + ok = rabbit_channel:shutdown(Chan) + end || Chan <- [Ch1, Ch2]], + passed. diff --git a/test/resource_monitor_SUITE.erl b/test/resource_monitor_SUITE.erl new file mode 100644 index 000000000000..378b75cc1a58 --- /dev/null +++ b/test/resource_monitor_SUITE.erl @@ -0,0 +1,64 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. +%% + +-module(resource_monitor_SUITE). + +-include_lib("common_test/include/ct.hrl"). + +-compile(export_all). + +all() -> + [ + parse_information_unit + ]. + +%% --------------------------------------------------------------------------- +%% Tests +%% --------------------------------------------------------------------------- + +parse_information_unit(_Config) -> + lists:foreach(fun ({S, V}) -> + V = rabbit_resource_monitor_misc:parse_information_unit(S) + end, + [ + {"1000", {ok, 1000}}, + + {"10kB", {ok, 10000}}, + {"10MB", {ok, 10000000}}, + {"10GB", {ok, 10000000000}}, + + {"10kiB", {ok, 10240}}, + {"10MiB", {ok, 10485760}}, + {"10GiB", {ok, 10737418240}}, + + {"10k", {ok, 10240}}, + {"10M", {ok, 10485760}}, + {"10G", {ok, 10737418240}}, + + {"10KB", {ok, 10000}}, + {"10K", {ok, 10240}}, + {"10m", {ok, 10485760}}, + {"10Mb", {ok, 10000000}}, + + {"0MB", {ok, 0}}, + + {"10 k", {error, parse_error}}, + {"MB", {error, parse_error}}, + {"", {error, parse_error}}, + {"0.5GB", {error, parse_error}}, + {"10TB", {error, parse_error}} + ]), + passed. diff --git a/test/supervisor2_SUITE.erl b/test/supervisor2_SUITE.erl new file mode 100644 index 000000000000..e2b95146c0ee --- /dev/null +++ b/test/supervisor2_SUITE.erl @@ -0,0 +1,90 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved. +%% + +-module(supervisor2_SUITE). + +-behaviour(supervisor2). + +-include_lib("common_test/include/ct.hrl"). + +-compile(export_all). + +all() -> + [ + check_shutdown_stop, + check_shutdown_ignored + ]. + +check_shutdown_stop(_Config) -> + ok = check_shutdown(stop, 200, 200, 2000). + +check_shutdown_ignored(_Config) -> + ok = check_shutdown(ignored, 1, 2, 2000). + +check_shutdown(SigStop, Iterations, ChildCount, SupTimeout) -> + {ok, Sup} = supervisor2:start_link(?MODULE, [SupTimeout]), + Res = lists:foldl( + fun (I, ok) -> + TestSupPid = erlang:whereis(?MODULE), + ChildPids = + [begin + {ok, ChildPid} = + supervisor2:start_child(TestSupPid, []), + ChildPid + end || _ <- lists:seq(1, ChildCount)], + MRef = erlang:monitor(process, TestSupPid), + [P ! SigStop || P <- ChildPids], + ok = supervisor2:terminate_child(Sup, test_sup), + {ok, _} = supervisor2:restart_child(Sup, test_sup), + receive + {'DOWN', MRef, process, TestSupPid, shutdown} -> + ok; + {'DOWN', MRef, process, TestSupPid, Reason} -> + {error, {I, Reason}} + end; + (_, R) -> + R + end, ok, lists:seq(1, Iterations)), + unlink(Sup), + MSupRef = erlang:monitor(process, Sup), + exit(Sup, shutdown), + receive + {'DOWN', MSupRef, process, Sup, _Reason} -> + ok + end, + Res. + +%% ------------------------------------------------------------------- +%% supervisor2 behavior callbacks. +%% ------------------------------------------------------------------- + +start_link() -> + Pid = spawn_link(fun () -> + process_flag(trap_exit, true), + receive stop -> ok end + end), + {ok, Pid}. + +init([Timeout]) -> + {ok, {{one_for_one, 0, 1}, + [{test_sup, {supervisor2, start_link, + [{local, ?MODULE}, ?MODULE, []]}, + transient, Timeout, supervisor, [?MODULE]}]}}; +init([]) -> + {ok, {{simple_one_for_one, 0, 1}, + [{test_worker, {?MODULE, start_link, []}, + temporary, 1000, worker, [?MODULE]}]}}. + diff --git a/test/truncate_SUITE.erl b/test/truncate_SUITE.erl new file mode 100644 index 000000000000..6c78be726de9 --- /dev/null +++ b/test/truncate_SUITE.erl @@ -0,0 +1,99 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(truncate_SUITE). + +-include_lib("common_test/include/ct.hrl"). + +-compile(export_all). + +all() -> + [ + {group, all_tests} + ]. + +groups() -> + [ + {all_tests, [parallel], [ + short_examples_exactly, + term_limit, + large_examples_for_size + ]} + ]. + +init_per_group(_, Config) -> Config. +end_per_group(_, Config) -> Config. + +short_examples_exactly(_Config) -> + F = fun (Term, Exp) -> + Exp = truncate:term(Term, {1, {10, 10, 5, 5}}), + Term = truncate:term(Term, {100000, {10, 10, 5, 5}}) + end, + FSmall = fun (Term, Exp) -> + Exp = truncate:term(Term, {1, {2, 2, 2, 2}}), + Term = truncate:term(Term, {100000, {2, 2, 2, 2}}) + end, + F([], []), + F("h", "h"), + F("hello world", "hello w..."), + F([[h,e,l,l,o,' ',w,o,r,l,d]], [[h,e,l,l,o,'...']]), + F([a|b], [a|b]), + F(<<"hello">>, <<"hello">>), + F([<<"hello world">>], [<<"he...">>]), + F(<<1:1>>, <<1:1>>), + F(<<1:81>>, <<0:56, "...">>), + F({{{{a}}},{b},c,d,e,f,g,h,i,j,k}, {{{'...'}},{b},c,d,e,f,g,h,i,j,'...'}), + FSmall({a,30,40,40,40,40}, {a,30,'...'}), + FSmall([a,30,40,40,40,40], [a,30,'...']), + P = spawn(fun() -> receive die -> ok end end), + F([0, 0.0, <<1:1>>, F, P], [0, 0.0, <<1:1>>, F, P]), + P ! die, + R = make_ref(), + F([R], [R]), + ok. + +term_limit(_Config) -> + W = erlang:system_info(wordsize), + S = <<"abc">>, + 1 = truncate:term_size(S, 4, W), + limit_exceeded = truncate:term_size(S, 3, W), + case 100 - truncate:term_size([S, S], 100, W) of + 22 -> ok; %% 32 bit + 38 -> ok %% 64 bit + end, + case 100 - truncate:term_size([S, [S]], 100, W) of + 30 -> ok; %% ditto + 54 -> ok + end, + limit_exceeded = truncate:term_size([S, S], 6, W), + ok. + +large_examples_for_size(_Config) -> + %% Real world values + Shrink = fun(Term) -> truncate:term(Term, {1, {1000, 100, 50, 5}}) end, + TestSize = fun(Term) -> + true = 5000000 < size(term_to_binary(Term)), + true = 500000 > size(term_to_binary(Shrink(Term))) + end, + TestSize(lists:seq(1, 5000000)), + TestSize(recursive_list(1000, 10)), + TestSize(recursive_list(5000, 20)), + TestSize(gb_sets:from_list([I || I <- lists:seq(1, 1000000)])), + TestSize(gb_trees:from_orddict([{I, I} || I <- lists:seq(1, 1000000)])), + ok. + +recursive_list(S, 0) -> lists:seq(1, S); +recursive_list(S, N) -> [recursive_list(S div N, N-1) || _ <- lists:seq(1, S)]. diff --git a/test/vm_memory_monitor_SUITE.erl b/test/vm_memory_monitor_SUITE.erl new file mode 100644 index 000000000000..195fff756758 --- /dev/null +++ b/test/vm_memory_monitor_SUITE.erl @@ -0,0 +1,42 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. +%% + +-module(vm_memory_monitor_SUITE). + +-include_lib("common_test/include/ct.hrl"). + +-compile(export_all). + +all() -> + [ + parse_line_linux + ]. + +%% --------------------------------------------------------------------------- +%% Test cases +%% --------------------------------------------------------------------------- + +parse_line_linux(_Config) -> + lists:foreach(fun ({S, {K, V}}) -> + {K, V} = vm_memory_monitor:parse_line_linux(S) + end, + [{"MemTotal: 0 kB", {'MemTotal', 0}}, + {"MemTotal: 502968 kB ", {'MemTotal', 515039232}}, + {"MemFree: 178232 kB", {'MemFree', 182509568}}, + {"MemTotal: 50296888", {'MemTotal', 50296888}}, + {"MemTotal 502968 kB", {'MemTotal', 515039232}}, + {"MemTotal 50296866 ", {'MemTotal', 50296866}}]), + ok. From ed6588bd1ea2c31b029d7ee510ae7df1ed0da62e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Thu, 7 Apr 2016 15:16:40 +0200 Subject: [PATCH 111/174] Squash several small testsuites in unit_SUITE and unit_inbroker_SUITE This allows to run most of the testcases in parallel. As the name of the new testsuites suggests, testcases are grouped by the fact they must run inside a running broker or not. References #725. [#116526487] --- test/credit_flow_SUITE.erl | 77 ------- test/dummy_supervisor2.erl | 41 ++++ test/password_hashing_SUITE.erl | 121 ---------- test/resource_monitor_SUITE.erl | 64 ------ test/supervisor2_SUITE.erl | 90 -------- test/truncate_SUITE.erl | 99 -------- test/unit_SUITE.erl | 212 ++++++++++++++++++ ...eout_SUITE.erl => unit_inbroker_SUITE.erl} | 161 ++++++++++++- test/vm_memory_monitor_SUITE.erl | 42 ---- 9 files changed, 404 insertions(+), 503 deletions(-) delete mode 100644 test/credit_flow_SUITE.erl create mode 100644 test/dummy_supervisor2.erl delete mode 100644 test/password_hashing_SUITE.erl delete mode 100644 test/resource_monitor_SUITE.erl delete mode 100644 test/supervisor2_SUITE.erl delete mode 100644 test/truncate_SUITE.erl create mode 100644 test/unit_SUITE.erl rename test/{rabbit_ctl_timeout_SUITE.erl => unit_inbroker_SUITE.erl} (55%) delete mode 100644 test/vm_memory_monitor_SUITE.erl diff --git a/test/credit_flow_SUITE.erl b/test/credit_flow_SUITE.erl deleted file mode 100644 index 5747f8c8d8f7..000000000000 --- a/test/credit_flow_SUITE.erl +++ /dev/null @@ -1,77 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developer of the Original Code is GoPivotal, Inc. -%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved. -%% - --module(credit_flow_SUITE). - --include_lib("common_test/include/ct.hrl"). - --compile(export_all). - -all() -> - [ - credit_flow_settings - ]. - -%% ------------------------------------------------------------------- -%% Testsuite setup/teardown. -%% ------------------------------------------------------------------- - -init_per_suite(Config) -> - rabbit_ct_helpers:log_environment(), - rabbit_ct_helpers:run_setup_steps(?MODULE, Config). - -end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config). - -%% --------------------------------------------------------------------------- -%% Test cases -%% --------------------------------------------------------------------------- - -credit_flow_settings(Config) -> - passed = rabbit_ct_broker_helpers:run_test_on_broker( - ?config(rmq_nodename, Config), - ?MODULE, credit_flow_settings1, [Config]). - -credit_flow_settings1(_Config) -> - %% default values - passed = test_proc(200, 50), - - application:set_env(rabbit, credit_flow_default_credit, {100, 20}), - passed = test_proc(100, 20), - - application:unset_env(rabbit, credit_flow_default_credit), - - % back to defaults - passed = test_proc(200, 50), - passed. - -test_proc(InitialCredit, MoreCreditAfter) -> - Pid = spawn(fun dummy/0), - Pid ! {credit, self()}, - {InitialCredit, MoreCreditAfter} = - receive - {credit, Val} -> Val - end, - passed. - -dummy() -> - credit_flow:send(self()), - receive - {credit, From} -> - From ! {credit, get(credit_flow_default_credit)}; - _ -> - dummy() - end. diff --git a/test/dummy_supervisor2.erl b/test/dummy_supervisor2.erl new file mode 100644 index 000000000000..9ca3f6329c0b --- /dev/null +++ b/test/dummy_supervisor2.erl @@ -0,0 +1,41 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2011-2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(dummy_supervisor2). + +-behaviour(supervisor2). + +-export([ + start_link/0, + init/1 + ]). + +start_link() -> + Pid = spawn_link(fun () -> + process_flag(trap_exit, true), + receive stop -> ok end + end), + {ok, Pid}. + +init([Timeout]) -> + {ok, {{one_for_one, 0, 1}, + [{test_sup, {supervisor2, start_link, + [{local, ?MODULE}, ?MODULE, []]}, + transient, Timeout, supervisor, [?MODULE]}]}}; +init([]) -> + {ok, {{simple_one_for_one, 0, 1}, + [{test_worker, {?MODULE, start_link, []}, + temporary, 1000, worker, [?MODULE]}]}}. diff --git a/test/password_hashing_SUITE.erl b/test/password_hashing_SUITE.erl deleted file mode 100644 index 50f0f4e6d1d4..000000000000 --- a/test/password_hashing_SUITE.erl +++ /dev/null @@ -1,121 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developer of the Original Code is GoPivotal, Inc. -%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved. -%% - --module(password_hashing_SUITE). - --include_lib("common_test/include/ct.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - --compile(export_all). - -all() -> - [ - password_hashing, - change_password - ]. - -%% ------------------------------------------------------------------- -%% Testsuite setup/teardown. -%% ------------------------------------------------------------------- - -init_per_suite(Config) -> - rabbit_ct_helpers:log_environment(), - rabbit_ct_helpers:run_setup_steps(?MODULE, Config). - -end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config). - -%% --------------------------------------------------------------------------- -%% Test cases -%% --------------------------------------------------------------------------- - -password_hashing(Config) -> - passed = rabbit_ct_broker_helpers:run_test_on_broker( - ?config(rmq_nodename, Config), - ?MODULE, password_hashing1, [Config]). - -password_hashing1(_Config) -> - rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(), - application:set_env(rabbit, password_hashing_module, - rabbit_password_hashing_md5), - rabbit_password_hashing_md5 = rabbit_password:hashing_mod(), - application:set_env(rabbit, password_hashing_module, - rabbit_password_hashing_sha256), - rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(), - - rabbit_password_hashing_sha256 = - rabbit_password:hashing_mod(rabbit_password_hashing_sha256), - rabbit_password_hashing_md5 = - rabbit_password:hashing_mod(rabbit_password_hashing_md5), - rabbit_password_hashing_md5 = - rabbit_password:hashing_mod(undefined), - - rabbit_password_hashing_md5 = - rabbit_auth_backend_internal:hashing_module_for_user( - #internal_user{}), - rabbit_password_hashing_md5 = - rabbit_auth_backend_internal:hashing_module_for_user( - #internal_user{ - hashing_algorithm = undefined - }), - rabbit_password_hashing_md5 = - rabbit_auth_backend_internal:hashing_module_for_user( - #internal_user{ - hashing_algorithm = rabbit_password_hashing_md5 - }), - - rabbit_password_hashing_sha256 = - rabbit_auth_backend_internal:hashing_module_for_user( - #internal_user{ - hashing_algorithm = rabbit_password_hashing_sha256 - }), - - passed. - -change_password(Config) -> - passed = rabbit_ct_broker_helpers:run_test_on_broker( - ?config(rmq_nodename, Config), - ?MODULE, change_password1, [Config]). - -change_password1(_Config) -> - UserName = <<"test_user">>, - Password = <<"test_password">>, - case rabbit_auth_backend_internal:lookup_user(UserName) of - {ok, _} -> rabbit_auth_backend_internal:delete_user(UserName); - _ -> ok - end, - ok = application:set_env(rabbit, password_hashing_module, - rabbit_password_hashing_md5), - ok = rabbit_auth_backend_internal:add_user(UserName, Password), - {ok, #auth_user{username = UserName}} = - rabbit_auth_backend_internal:user_login_authentication( - UserName, [{password, Password}]), - ok = application:set_env(rabbit, password_hashing_module, - rabbit_password_hashing_sha256), - {ok, #auth_user{username = UserName}} = - rabbit_auth_backend_internal:user_login_authentication( - UserName, [{password, Password}]), - - NewPassword = <<"test_password1">>, - ok = rabbit_auth_backend_internal:change_password(UserName, NewPassword), - {ok, #auth_user{username = UserName}} = - rabbit_auth_backend_internal:user_login_authentication( - UserName, [{password, NewPassword}]), - - {refused, _, [UserName]} = - rabbit_auth_backend_internal:user_login_authentication( - UserName, [{password, Password}]), - passed. diff --git a/test/resource_monitor_SUITE.erl b/test/resource_monitor_SUITE.erl deleted file mode 100644 index 378b75cc1a58..000000000000 --- a/test/resource_monitor_SUITE.erl +++ /dev/null @@ -1,64 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developer of the Original Code is GoPivotal, Inc. -%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. -%% - --module(resource_monitor_SUITE). - --include_lib("common_test/include/ct.hrl"). - --compile(export_all). - -all() -> - [ - parse_information_unit - ]. - -%% --------------------------------------------------------------------------- -%% Tests -%% --------------------------------------------------------------------------- - -parse_information_unit(_Config) -> - lists:foreach(fun ({S, V}) -> - V = rabbit_resource_monitor_misc:parse_information_unit(S) - end, - [ - {"1000", {ok, 1000}}, - - {"10kB", {ok, 10000}}, - {"10MB", {ok, 10000000}}, - {"10GB", {ok, 10000000000}}, - - {"10kiB", {ok, 10240}}, - {"10MiB", {ok, 10485760}}, - {"10GiB", {ok, 10737418240}}, - - {"10k", {ok, 10240}}, - {"10M", {ok, 10485760}}, - {"10G", {ok, 10737418240}}, - - {"10KB", {ok, 10000}}, - {"10K", {ok, 10240}}, - {"10m", {ok, 10485760}}, - {"10Mb", {ok, 10000000}}, - - {"0MB", {ok, 0}}, - - {"10 k", {error, parse_error}}, - {"MB", {error, parse_error}}, - {"", {error, parse_error}}, - {"0.5GB", {error, parse_error}}, - {"10TB", {error, parse_error}} - ]), - passed. diff --git a/test/supervisor2_SUITE.erl b/test/supervisor2_SUITE.erl deleted file mode 100644 index e2b95146c0ee..000000000000 --- a/test/supervisor2_SUITE.erl +++ /dev/null @@ -1,90 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developer of the Original Code is GoPivotal, Inc. -%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved. -%% - --module(supervisor2_SUITE). - --behaviour(supervisor2). - --include_lib("common_test/include/ct.hrl"). - --compile(export_all). - -all() -> - [ - check_shutdown_stop, - check_shutdown_ignored - ]. - -check_shutdown_stop(_Config) -> - ok = check_shutdown(stop, 200, 200, 2000). - -check_shutdown_ignored(_Config) -> - ok = check_shutdown(ignored, 1, 2, 2000). - -check_shutdown(SigStop, Iterations, ChildCount, SupTimeout) -> - {ok, Sup} = supervisor2:start_link(?MODULE, [SupTimeout]), - Res = lists:foldl( - fun (I, ok) -> - TestSupPid = erlang:whereis(?MODULE), - ChildPids = - [begin - {ok, ChildPid} = - supervisor2:start_child(TestSupPid, []), - ChildPid - end || _ <- lists:seq(1, ChildCount)], - MRef = erlang:monitor(process, TestSupPid), - [P ! SigStop || P <- ChildPids], - ok = supervisor2:terminate_child(Sup, test_sup), - {ok, _} = supervisor2:restart_child(Sup, test_sup), - receive - {'DOWN', MRef, process, TestSupPid, shutdown} -> - ok; - {'DOWN', MRef, process, TestSupPid, Reason} -> - {error, {I, Reason}} - end; - (_, R) -> - R - end, ok, lists:seq(1, Iterations)), - unlink(Sup), - MSupRef = erlang:monitor(process, Sup), - exit(Sup, shutdown), - receive - {'DOWN', MSupRef, process, Sup, _Reason} -> - ok - end, - Res. - -%% ------------------------------------------------------------------- -%% supervisor2 behavior callbacks. -%% ------------------------------------------------------------------- - -start_link() -> - Pid = spawn_link(fun () -> - process_flag(trap_exit, true), - receive stop -> ok end - end), - {ok, Pid}. - -init([Timeout]) -> - {ok, {{one_for_one, 0, 1}, - [{test_sup, {supervisor2, start_link, - [{local, ?MODULE}, ?MODULE, []]}, - transient, Timeout, supervisor, [?MODULE]}]}}; -init([]) -> - {ok, {{simple_one_for_one, 0, 1}, - [{test_worker, {?MODULE, start_link, []}, - temporary, 1000, worker, [?MODULE]}]}}. - diff --git a/test/truncate_SUITE.erl b/test/truncate_SUITE.erl deleted file mode 100644 index 6c78be726de9..000000000000 --- a/test/truncate_SUITE.erl +++ /dev/null @@ -1,99 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License -%% at http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See -%% the License for the specific language governing rights and -%% limitations under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developer of the Original Code is GoPivotal, Inc. -%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. -%% - --module(truncate_SUITE). - --include_lib("common_test/include/ct.hrl"). - --compile(export_all). - -all() -> - [ - {group, all_tests} - ]. - -groups() -> - [ - {all_tests, [parallel], [ - short_examples_exactly, - term_limit, - large_examples_for_size - ]} - ]. - -init_per_group(_, Config) -> Config. -end_per_group(_, Config) -> Config. - -short_examples_exactly(_Config) -> - F = fun (Term, Exp) -> - Exp = truncate:term(Term, {1, {10, 10, 5, 5}}), - Term = truncate:term(Term, {100000, {10, 10, 5, 5}}) - end, - FSmall = fun (Term, Exp) -> - Exp = truncate:term(Term, {1, {2, 2, 2, 2}}), - Term = truncate:term(Term, {100000, {2, 2, 2, 2}}) - end, - F([], []), - F("h", "h"), - F("hello world", "hello w..."), - F([[h,e,l,l,o,' ',w,o,r,l,d]], [[h,e,l,l,o,'...']]), - F([a|b], [a|b]), - F(<<"hello">>, <<"hello">>), - F([<<"hello world">>], [<<"he...">>]), - F(<<1:1>>, <<1:1>>), - F(<<1:81>>, <<0:56, "...">>), - F({{{{a}}},{b},c,d,e,f,g,h,i,j,k}, {{{'...'}},{b},c,d,e,f,g,h,i,j,'...'}), - FSmall({a,30,40,40,40,40}, {a,30,'...'}), - FSmall([a,30,40,40,40,40], [a,30,'...']), - P = spawn(fun() -> receive die -> ok end end), - F([0, 0.0, <<1:1>>, F, P], [0, 0.0, <<1:1>>, F, P]), - P ! die, - R = make_ref(), - F([R], [R]), - ok. - -term_limit(_Config) -> - W = erlang:system_info(wordsize), - S = <<"abc">>, - 1 = truncate:term_size(S, 4, W), - limit_exceeded = truncate:term_size(S, 3, W), - case 100 - truncate:term_size([S, S], 100, W) of - 22 -> ok; %% 32 bit - 38 -> ok %% 64 bit - end, - case 100 - truncate:term_size([S, [S]], 100, W) of - 30 -> ok; %% ditto - 54 -> ok - end, - limit_exceeded = truncate:term_size([S, S], 6, W), - ok. - -large_examples_for_size(_Config) -> - %% Real world values - Shrink = fun(Term) -> truncate:term(Term, {1, {1000, 100, 50, 5}}) end, - TestSize = fun(Term) -> - true = 5000000 < size(term_to_binary(Term)), - true = 500000 > size(term_to_binary(Shrink(Term))) - end, - TestSize(lists:seq(1, 5000000)), - TestSize(recursive_list(1000, 10)), - TestSize(recursive_list(5000, 20)), - TestSize(gb_sets:from_list([I || I <- lists:seq(1, 1000000)])), - TestSize(gb_trees:from_orddict([{I, I} || I <- lists:seq(1, 1000000)])), - ok. - -recursive_list(S, 0) -> lists:seq(1, S); -recursive_list(S, N) -> [recursive_list(S div N, N-1) || _ <- lists:seq(1, S)]. diff --git a/test/unit_SUITE.erl b/test/unit_SUITE.erl new file mode 100644 index 000000000000..eddcba5803a4 --- /dev/null +++ b/test/unit_SUITE.erl @@ -0,0 +1,212 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2011-2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(unit_SUITE). + +-include_lib("common_test/include/ct.hrl"). + +-compile(export_all). + +all() -> + [ + {group, parallel_tests} + ]. + +groups() -> + [ + {parallel_tests, [parallel], [ + {resource_monitor, [parallel], [ + parse_information_unit + ]}, + {supervisor2, [], [ + check_shutdown_stop, + check_shutdown_ignored + ]}, + {truncate, [parallel], [ + short_examples_exactly, + term_limit, + large_examples_for_size + ]}, + {vm_memory_monitor, [parallel], [ + parse_line_linux + ]} + ]} + ]. + +init_per_group(_, Config) -> Config. +end_per_group(_, Config) -> Config. + +%% --------------------------------------------------------------------------- +%% rabbit_resource_monitor. +%% --------------------------------------------------------------------------- + +parse_information_unit(_Config) -> + lists:foreach(fun ({S, V}) -> + V = rabbit_resource_monitor_misc:parse_information_unit(S) + end, + [ + {"1000", {ok, 1000}}, + + {"10kB", {ok, 10000}}, + {"10MB", {ok, 10000000}}, + {"10GB", {ok, 10000000000}}, + + {"10kiB", {ok, 10240}}, + {"10MiB", {ok, 10485760}}, + {"10GiB", {ok, 10737418240}}, + + {"10k", {ok, 10240}}, + {"10M", {ok, 10485760}}, + {"10G", {ok, 10737418240}}, + + {"10KB", {ok, 10000}}, + {"10K", {ok, 10240}}, + {"10m", {ok, 10485760}}, + {"10Mb", {ok, 10000000}}, + + {"0MB", {ok, 0}}, + + {"10 k", {error, parse_error}}, + {"MB", {error, parse_error}}, + {"", {error, parse_error}}, + {"0.5GB", {error, parse_error}}, + {"10TB", {error, parse_error}} + ]), + passed. + +%% --------------------------------------------------------------------------- +%% supervisor2. +%% --------------------------------------------------------------------------- + +check_shutdown_stop(_Config) -> + ok = check_shutdown(stop, 200, 200, 2000). + +check_shutdown_ignored(_Config) -> + ok = check_shutdown(ignored, 1, 2, 2000). + +check_shutdown(SigStop, Iterations, ChildCount, SupTimeout) -> + {ok, Sup} = supervisor2:start_link(dummy_supervisor2, [SupTimeout]), + Res = lists:foldl( + fun (I, ok) -> + TestSupPid = erlang:whereis(dummy_supervisor2), + ChildPids = + [begin + {ok, ChildPid} = + supervisor2:start_child(TestSupPid, []), + ChildPid + end || _ <- lists:seq(1, ChildCount)], + MRef = erlang:monitor(process, TestSupPid), + [P ! SigStop || P <- ChildPids], + ok = supervisor2:terminate_child(Sup, test_sup), + {ok, _} = supervisor2:restart_child(Sup, test_sup), + receive + {'DOWN', MRef, process, TestSupPid, shutdown} -> + ok; + {'DOWN', MRef, process, TestSupPid, Reason} -> + {error, {I, Reason}} + end; + (_, R) -> + R + end, ok, lists:seq(1, Iterations)), + unlink(Sup), + MSupRef = erlang:monitor(process, Sup), + exit(Sup, shutdown), + receive + {'DOWN', MSupRef, process, Sup, _Reason} -> + ok + end, + Res. + +%% --------------------------------------------------------------------------- +%% truncate. +%% --------------------------------------------------------------------------- + +short_examples_exactly(_Config) -> + F = fun (Term, Exp) -> + Exp = truncate:term(Term, {1, {10, 10, 5, 5}}), + Term = truncate:term(Term, {100000, {10, 10, 5, 5}}) + end, + FSmall = fun (Term, Exp) -> + Exp = truncate:term(Term, {1, {2, 2, 2, 2}}), + Term = truncate:term(Term, {100000, {2, 2, 2, 2}}) + end, + F([], []), + F("h", "h"), + F("hello world", "hello w..."), + F([[h,e,l,l,o,' ',w,o,r,l,d]], [[h,e,l,l,o,'...']]), + F([a|b], [a|b]), + F(<<"hello">>, <<"hello">>), + F([<<"hello world">>], [<<"he...">>]), + F(<<1:1>>, <<1:1>>), + F(<<1:81>>, <<0:56, "...">>), + F({{{{a}}},{b},c,d,e,f,g,h,i,j,k}, {{{'...'}},{b},c,d,e,f,g,h,i,j,'...'}), + FSmall({a,30,40,40,40,40}, {a,30,'...'}), + FSmall([a,30,40,40,40,40], [a,30,'...']), + P = spawn(fun() -> receive die -> ok end end), + F([0, 0.0, <<1:1>>, F, P], [0, 0.0, <<1:1>>, F, P]), + P ! die, + R = make_ref(), + F([R], [R]), + ok. + +term_limit(_Config) -> + W = erlang:system_info(wordsize), + S = <<"abc">>, + 1 = truncate:term_size(S, 4, W), + limit_exceeded = truncate:term_size(S, 3, W), + case 100 - truncate:term_size([S, S], 100, W) of + 22 -> ok; %% 32 bit + 38 -> ok %% 64 bit + end, + case 100 - truncate:term_size([S, [S]], 100, W) of + 30 -> ok; %% ditto + 54 -> ok + end, + limit_exceeded = truncate:term_size([S, S], 6, W), + ok. + +large_examples_for_size(_Config) -> + %% Real world values + Shrink = fun(Term) -> truncate:term(Term, {1, {1000, 100, 50, 5}}) end, + TestSize = fun(Term) -> + true = 5000000 < size(term_to_binary(Term)), + true = 500000 > size(term_to_binary(Shrink(Term))) + end, + TestSize(lists:seq(1, 5000000)), + TestSize(recursive_list(1000, 10)), + TestSize(recursive_list(5000, 20)), + TestSize(gb_sets:from_list([I || I <- lists:seq(1, 1000000)])), + TestSize(gb_trees:from_orddict([{I, I} || I <- lists:seq(1, 1000000)])), + ok. + +recursive_list(S, 0) -> lists:seq(1, S); +recursive_list(S, N) -> [recursive_list(S div N, N-1) || _ <- lists:seq(1, S)]. + +%% --------------------------------------------------------------------------- +%% vm_memory_monitor. +%% --------------------------------------------------------------------------- + +parse_line_linux(_Config) -> + lists:foreach(fun ({S, {K, V}}) -> + {K, V} = vm_memory_monitor:parse_line_linux(S) + end, + [{"MemTotal: 0 kB", {'MemTotal', 0}}, + {"MemTotal: 502968 kB ", {'MemTotal', 515039232}}, + {"MemFree: 178232 kB", {'MemFree', 182509568}}, + {"MemTotal: 50296888", {'MemTotal', 50296888}}, + {"MemTotal 502968 kB", {'MemTotal', 515039232}}, + {"MemTotal 50296866 ", {'MemTotal', 50296866}}]), + ok. diff --git a/test/rabbit_ctl_timeout_SUITE.erl b/test/unit_inbroker_SUITE.erl similarity index 55% rename from test/rabbit_ctl_timeout_SUITE.erl rename to test/unit_inbroker_SUITE.erl index 92b2867a5426..4b58221727b3 100644 --- a/test/rabbit_ctl_timeout_SUITE.erl +++ b/test/unit_inbroker_SUITE.erl @@ -1,20 +1,20 @@ %% The contents of this file are subject to the Mozilla Public License %% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License -%% at http://www.mozilla.org/MPL/ +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ %% %% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See -%% the License for the specific language governing rights and -%% limitations under the License. +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. %% %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is GoPivotal, Inc. -%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. +%% Copyright (c) 2011-2016 Pivotal Software, Inc. All rights reserved. %% --module(rabbit_ctl_timeout_SUITE). +-module(unit_inbroker_SUITE). -include_lib("common_test/include/ct.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). @@ -25,7 +25,23 @@ all() -> [ - list_operations_timeout_pass + {group, parallel_tests} + ]. + +groups() -> + [ + {parallel_tests, [parallel], [ + {credit_flow, [parallel], [ + credit_flow_settings + ]}, + {password_hashing, [parallel], [ + password_hashing, + change_password + ]}, + {rabbitmqctl, [parallel], [ + list_operations_timeout_pass + ]} + ]} ]. %% ------------------------------------------------------------------- @@ -39,12 +55,137 @@ init_per_suite(Config) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). +init_per_group(_, Config) -> Config. +end_per_group(_, Config) -> Config. + +%% --------------------------------------------------------------------------- +%% Credit flow. +%% --------------------------------------------------------------------------- + +credit_flow_settings(Config) -> + passed = rabbit_ct_broker_helpers:run_on_broker( + ?config(rmq_nodename, Config), + ?MODULE, credit_flow_settings1, [Config]). + +credit_flow_settings1(_Config) -> + %% default values + passed = test_proc(200, 50), + + application:set_env(rabbit, credit_flow_default_credit, {100, 20}), + passed = test_proc(100, 20), + + application:unset_env(rabbit, credit_flow_default_credit), + + % back to defaults + passed = test_proc(200, 50), + passed. + +test_proc(InitialCredit, MoreCreditAfter) -> + Pid = spawn(fun dummy/0), + Pid ! {credit, self()}, + {InitialCredit, MoreCreditAfter} = + receive + {credit, Val} -> Val + end, + passed. + +dummy() -> + credit_flow:send(self()), + receive + {credit, From} -> + From ! {credit, get(credit_flow_default_credit)}; + _ -> + dummy() + end. + +%% --------------------------------------------------------------------------- +%% Password hashing. +%% --------------------------------------------------------------------------- + +password_hashing(Config) -> + passed = rabbit_ct_broker_helpers:run_on_broker( + ?config(rmq_nodename, Config), + ?MODULE, password_hashing1, [Config]). + +password_hashing1(_Config) -> + rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(), + application:set_env(rabbit, password_hashing_module, + rabbit_password_hashing_md5), + rabbit_password_hashing_md5 = rabbit_password:hashing_mod(), + application:set_env(rabbit, password_hashing_module, + rabbit_password_hashing_sha256), + rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(), + + rabbit_password_hashing_sha256 = + rabbit_password:hashing_mod(rabbit_password_hashing_sha256), + rabbit_password_hashing_md5 = + rabbit_password:hashing_mod(rabbit_password_hashing_md5), + rabbit_password_hashing_md5 = + rabbit_password:hashing_mod(undefined), + + rabbit_password_hashing_md5 = + rabbit_auth_backend_internal:hashing_module_for_user( + #internal_user{}), + rabbit_password_hashing_md5 = + rabbit_auth_backend_internal:hashing_module_for_user( + #internal_user{ + hashing_algorithm = undefined + }), + rabbit_password_hashing_md5 = + rabbit_auth_backend_internal:hashing_module_for_user( + #internal_user{ + hashing_algorithm = rabbit_password_hashing_md5 + }), + + rabbit_password_hashing_sha256 = + rabbit_auth_backend_internal:hashing_module_for_user( + #internal_user{ + hashing_algorithm = rabbit_password_hashing_sha256 + }), + + passed. + +change_password(Config) -> + passed = rabbit_ct_broker_helpers:run_on_broker( + ?config(rmq_nodename, Config), + ?MODULE, change_password1, [Config]). + +change_password1(_Config) -> + UserName = <<"test_user">>, + Password = <<"test_password">>, + case rabbit_auth_backend_internal:lookup_user(UserName) of + {ok, _} -> rabbit_auth_backend_internal:delete_user(UserName); + _ -> ok + end, + ok = application:set_env(rabbit, password_hashing_module, + rabbit_password_hashing_md5), + ok = rabbit_auth_backend_internal:add_user(UserName, Password), + {ok, #auth_user{username = UserName}} = + rabbit_auth_backend_internal:user_login_authentication( + UserName, [{password, Password}]), + ok = application:set_env(rabbit, password_hashing_module, + rabbit_password_hashing_sha256), + {ok, #auth_user{username = UserName}} = + rabbit_auth_backend_internal:user_login_authentication( + UserName, [{password, Password}]), + + NewPassword = <<"test_password1">>, + ok = rabbit_auth_backend_internal:change_password(UserName, NewPassword), + {ok, #auth_user{username = UserName}} = + rabbit_auth_backend_internal:user_login_authentication( + UserName, [{password, NewPassword}]), + + {refused, _, [UserName]} = + rabbit_auth_backend_internal:user_login_authentication( + UserName, [{password, Password}]), + passed. + %% ------------------------------------------------------------------- -%% Test cases. +%% rabbitmqctl. %% ------------------------------------------------------------------- list_operations_timeout_pass(Config) -> - passed = rabbit_ct_broker_helpers:run_test_on_broker( + passed = rabbit_ct_broker_helpers:run_on_broker( ?config(rmq_nodename, Config), ?MODULE, list_operations_timeout_pass1, [Config]). diff --git a/test/vm_memory_monitor_SUITE.erl b/test/vm_memory_monitor_SUITE.erl deleted file mode 100644 index 195fff756758..000000000000 --- a/test/vm_memory_monitor_SUITE.erl +++ /dev/null @@ -1,42 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developer of the Original Code is GoPivotal, Inc. -%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. -%% - --module(vm_memory_monitor_SUITE). - --include_lib("common_test/include/ct.hrl"). - --compile(export_all). - -all() -> - [ - parse_line_linux - ]. - -%% --------------------------------------------------------------------------- -%% Test cases -%% --------------------------------------------------------------------------- - -parse_line_linux(_Config) -> - lists:foreach(fun ({S, {K, V}}) -> - {K, V} = vm_memory_monitor:parse_line_linux(S) - end, - [{"MemTotal: 0 kB", {'MemTotal', 0}}, - {"MemTotal: 502968 kB ", {'MemTotal', 515039232}}, - {"MemFree: 178232 kB", {'MemFree', 182509568}}, - {"MemTotal: 50296888", {'MemTotal', 50296888}}, - {"MemTotal 502968 kB", {'MemTotal', 515039232}}, - {"MemTotal 50296866 ", {'MemTotal', 50296866}}]), - ok. From 0aee9e647bf24d69044d06d79cadca466ccfbf00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Thu, 7 Apr 2016 15:30:17 +0200 Subject: [PATCH 112/174] Makefile: Remove rabbitmq_test as a TEST_DEPS When this branch gets merged, rabbitmq-components.mk should be fixed instead. References #725. [#116526487] --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index 266a4ed13ddc..6703f74047c1 100644 --- a/Makefile +++ b/Makefile @@ -66,6 +66,9 @@ DEPS += $(DISTRIBUTED_DEPS) endif endif +# FIXME: Remove rabbitmq_test as TEST_DEPS from here for now. +TEST_DEPS := $(filter-out rabbitmq_test,$(TEST_DEPS)) + include erlang.mk # -------------------------------------------------------------------- From 2c9d5c452e1863bcab2c110ad7a16dbc82e04473 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Thu, 21 Apr 2016 18:22:26 +0200 Subject: [PATCH 113/174] Makefile: Add amqp_client and meck to TEST_DEPS References #725. [#116526487] --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6703f74047c1..d8761c022ffc 100644 --- a/Makefile +++ b/Makefile @@ -67,7 +67,7 @@ endif endif # FIXME: Remove rabbitmq_test as TEST_DEPS from here for now. -TEST_DEPS := $(filter-out rabbitmq_test,$(TEST_DEPS)) +TEST_DEPS := amqp_client meck $(filter-out rabbitmq_test,$(TEST_DEPS)) include erlang.mk From 662214d3e062635a9a281c3d72b91a0e2182a908 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Thu, 7 Apr 2016 15:53:09 +0200 Subject: [PATCH 114/174] Update erlang.mk In particular, this brings support for `make ct-$suite t=...`. References #725. [#116526487] --- build.config | 43 ----- erlang.mk | 495 ++++++++++++++++++++++++++++++--------------------- 2 files changed, 290 insertions(+), 248 deletions(-) delete mode 100644 build.config diff --git a/build.config b/build.config deleted file mode 100644 index b1430689a182..000000000000 --- a/build.config +++ /dev/null @@ -1,43 +0,0 @@ -# Do *not* comment or remove core modules -# unless you know what you are doing. -# -# Feel free to comment plugins out however. - -# Core modules. -core/core -index/* -core/index -core/deps - -# Plugins that must run before Erlang code gets compiled. -plugins/erlydtl -plugins/protobuffs - -# Core modules, continued. -core/erlc -core/docs -core/rel -core/test -core/compat - -# Plugins. -plugins/asciidoc -plugins/bootstrap -plugins/c_src -plugins/ci -plugins/ct -plugins/dialyzer -# plugins/edoc -plugins/elvis -plugins/escript -plugins/eunit -plugins/relx -plugins/shell -plugins/triq -plugins/xref - -# Plugins enhancing the functionality of other plugins. -plugins/cover - -# Core modules which can use variables from plugins. -core/deps-tools diff --git a/erlang.mk b/erlang.mk index fc2d806f2e82..efbcf5cd11a5 100644 --- a/erlang.mk +++ b/erlang.mk @@ -16,7 +16,7 @@ ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST))) -ERLANG_MK_VERSION = 2.0.0-pre.2-16-gb52203c-dirty +ERLANG_MK_VERSION = 2.0.0-pre.2-76-g427cfb8 # Core configuration. @@ -84,7 +84,7 @@ all:: deps app rel rel:: $(verbose) : -check:: clean app tests +check:: tests clean:: clean-crashdump @@ -421,6 +421,14 @@ pkg_boss_db_fetch = git pkg_boss_db_repo = https://github.com/ErlyORM/boss_db pkg_boss_db_commit = master +PACKAGES += brod +pkg_brod_name = brod +pkg_brod_description = Kafka client in Erlang +pkg_brod_homepage = https://github.com/klarna/brod +pkg_brod_fetch = git +pkg_brod_repo = https://github.com/klarna/brod.git +pkg_brod_commit = master + PACKAGES += bson pkg_bson_name = bson pkg_bson_description = BSON documents in Erlang, see bsonspec.org @@ -885,14 +893,6 @@ pkg_dh_date_fetch = git pkg_dh_date_repo = https://github.com/daleharvey/dh_date pkg_dh_date_commit = master -PACKAGES += dhtcrawler -pkg_dhtcrawler_name = dhtcrawler -pkg_dhtcrawler_description = dhtcrawler is a DHT crawler written in erlang. It can join a DHT network and crawl many P2P torrents. -pkg_dhtcrawler_homepage = https://github.com/kevinlynx/dhtcrawler -pkg_dhtcrawler_fetch = git -pkg_dhtcrawler_repo = https://github.com/kevinlynx/dhtcrawler -pkg_dhtcrawler_commit = master - PACKAGES += dirbusterl pkg_dirbusterl_name = dirbusterl pkg_dirbusterl_description = DirBuster successor in Erlang @@ -1139,7 +1139,7 @@ pkg_elvis_description = Erlang Style Reviewer pkg_elvis_homepage = https://github.com/inaka/elvis pkg_elvis_fetch = git pkg_elvis_repo = https://github.com/inaka/elvis -pkg_elvis_commit = 0.2.4 +pkg_elvis_commit = master PACKAGES += emagick pkg_emagick_name = emagick @@ -1781,6 +1781,14 @@ pkg_geef_fetch = git pkg_geef_repo = https://github.com/carlosmn/geef pkg_geef_commit = master +PACKAGES += gen_coap +pkg_gen_coap_name = gen_coap +pkg_gen_coap_description = Generic Erlang CoAP Client/Server +pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap +pkg_gen_coap_fetch = git +pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap +pkg_gen_coap_commit = master + PACKAGES += gen_cycle pkg_gen_cycle_name = gen_cycle pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks @@ -1981,6 +1989,14 @@ pkg_hyper_fetch = git pkg_hyper_repo = https://github.com/GameAnalytics/hyper pkg_hyper_commit = master +PACKAGES += i18n +pkg_i18n_name = i18n +pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e) +pkg_i18n_homepage = https://github.com/erlang-unicode/i18n +pkg_i18n_fetch = git +pkg_i18n_repo = https://github.com/erlang-unicode/i18n +pkg_i18n_commit = master + PACKAGES += ibrowse pkg_ibrowse_name = ibrowse pkg_ibrowse_description = Erlang HTTP client @@ -2501,6 +2517,14 @@ pkg_merl_fetch = git pkg_merl_repo = https://github.com/richcarl/merl pkg_merl_commit = master +PACKAGES += mimerl +pkg_mimerl_name = mimerl +pkg_mimerl_description = library to handle mimetypes +pkg_mimerl_homepage = https://github.com/benoitc/mimerl +pkg_mimerl_fetch = git +pkg_mimerl_repo = https://github.com/benoitc/mimerl +pkg_mimerl_commit = master + PACKAGES += mimetypes pkg_mimetypes_name = mimetypes pkg_mimetypes_description = Erlang MIME types library @@ -2733,14 +2757,6 @@ pkg_oauth2_fetch = git pkg_oauth2_repo = https://github.com/kivra/oauth2 pkg_oauth2_commit = master -PACKAGES += oauth2c -pkg_oauth2c_name = oauth2c -pkg_oauth2c_description = Erlang OAuth2 Client -pkg_oauth2c_homepage = https://github.com/kivra/oauth2_client -pkg_oauth2c_fetch = git -pkg_oauth2c_repo = https://github.com/kivra/oauth2_client -pkg_oauth2c_commit = master - PACKAGES += octopus pkg_octopus_name = octopus pkg_octopus_description = Small and flexible pool manager written in Erlang @@ -3533,6 +3549,14 @@ pkg_stripe_fetch = git pkg_stripe_repo = https://github.com/mattsta/stripe-erlang pkg_stripe_commit = v1 +PACKAGES += supervisor3 +pkg_supervisor3_name = supervisor3 +pkg_supervisor3_description = OTP supervisor with additional strategies +pkg_supervisor3_homepage = https://github.com/klarna/supervisor3 +pkg_supervisor3_fetch = git +pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git +pkg_supervisor3_commit = master + PACKAGES += surrogate pkg_surrogate_name = surrogate pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes. @@ -3907,7 +3931,7 @@ pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref) pkg_xref_runner_homepage = https://github.com/inaka/xref_runner pkg_xref_runner_fetch = git pkg_xref_runner_repo = https://github.com/inaka/xref_runner -pkg_xref_runner_commit = 0.2.0 +pkg_xref_runner_commit = 0.2.3 PACKAGES += yamerl pkg_yamerl_name = yamerl @@ -4092,7 +4116,10 @@ endif # While Makefile file could be GNUmakefile or makefile, # in practice only Makefile is needed so far. define dep_autopatch - if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \ + if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \ + $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \ + $(call dep_autopatch_erlang_mk,$(1)); \ + elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \ if [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \ $(call dep_autopatch2,$(1)); \ elif [ 0 != `grep -ci rebar $(DEPS_DIR)/$(1)/Makefile` ]; then \ @@ -4100,12 +4127,7 @@ define dep_autopatch elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i rebar '{}' \;`" ]; then \ $(call dep_autopatch2,$(1)); \ else \ - if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \ - $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \ - $(call dep_autopatch_erlang_mk,$(1)); \ - else \ - $(call erlang,$(call dep_autopatch_app.erl,$(1))); \ - fi \ + $(call erlang,$(call dep_autopatch_app.erl,$(1))); \ fi \ else \ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \ @@ -4117,8 +4139,11 @@ define dep_autopatch endef define dep_autopatch2 + if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \ + $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \ + fi; \ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \ - if [ -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \ + if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \ $(call dep_autopatch_fetch_rebar); \ $(call dep_autopatch_rebar,$(1)); \ else \ @@ -4256,57 +4281,6 @@ define dep_autopatch_rebar.erl Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names])) end end(), - FindFirst = fun(F, Fd) -> - case io:parse_erl_form(Fd, undefined) of - {ok, {attribute, _, compile, {parse_transform, PT}}, _} -> - [PT, F(F, Fd)]; - {ok, {attribute, _, compile, CompileOpts}, _} when is_list(CompileOpts) -> - case proplists:get_value(parse_transform, CompileOpts) of - undefined -> [F(F, Fd)]; - PT -> [PT, F(F, Fd)] - end; - {ok, {attribute, _, include, Hrl}, _} -> - case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of - {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)]; - _ -> - case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ Hrl, [read]) of - {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)]; - _ -> [F(F, Fd)] - end - end; - {ok, {attribute, _, include_lib, "$(1)/include/" ++ Hrl}, _} -> - {ok, HrlFd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]), - [F(F, HrlFd), F(F, Fd)]; - {ok, {attribute, _, include_lib, Hrl}, _} -> - case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of - {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)]; - _ -> [F(F, Fd)] - end; - {ok, {attribute, _, import, {Imp, _}}, _} -> - case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(Imp) ++ ".erl", [read]) of - {ok, ImpFd} -> [Imp, F(F, ImpFd), F(F, Fd)]; - _ -> [F(F, Fd)] - end; - {eof, _} -> - file:close(Fd), - []; - _ -> - F(F, Fd) - end - end, - fun() -> - ErlFiles = filelib:wildcard("$(call core_native_path,$(DEPS_DIR)/$1/src/)*.erl"), - First0 = lists:usort(lists:flatten([begin - {ok, Fd} = file:open(F, [read]), - FindFirst(FindFirst, Fd) - end || F <- ErlFiles])), - First = lists:flatten([begin - {ok, Fd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", [read]), - FindFirst(FindFirst, Fd) - end || M <- First0, lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)]) ++ First0, - Write(["COMPILE_FIRST +=", [[" ", atom_to_list(M)] || M <- First, - lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)], "\n"]) - end(), Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"), Write("\npreprocess::\n"), Write("\npre-deps::\n"), @@ -4419,9 +4393,10 @@ define dep_autopatch_rebar.erl Output, ": $$\(foreach ext,.c .C .cc .cpp,", "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n", "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)", - case filename:extension(Output) of - [] -> "\n"; - _ -> " -shared\n" + case {filename:extension(Output), $(PLATFORM)} of + {[], _} -> "\n"; + {_, darwin} -> "\n"; + _ -> " -shared\n" end]) end, [PortSpec(S) || S <- PortSpecs] @@ -4490,6 +4465,15 @@ define dep_autopatch_app.erl halt() endef +define dep_autopatch_appsrc_script.erl + AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)", + AppSrcScript = AppSrc ++ ".script", + Bindings = erl_eval:new_bindings(), + {ok, Conf} = file:script(AppSrcScript, Bindings), + ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])), + halt() +endef + define dep_autopatch_appsrc.erl AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)", AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end, @@ -4576,10 +4560,11 @@ $(DEPS_DIR)/$(call dep_name,$1): exit 17; \ fi $(verbose) mkdir -p $(DEPS_DIR) - $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$1)),$1) - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure.ac -o -f $(DEPS_DIR)/$(DEP_NAME)/configure.in ]; then \ - echo " AUTO " $(DEP_STR); \ - cd $(DEPS_DIR)/$(DEP_NAME) && autoreconf -Wall -vif -I m4; \ + $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1)) + $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \ + && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \ + echo " AUTO " $(1); \ + cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \ fi - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \ echo " CONF " $(DEP_STR); \ @@ -4672,28 +4657,10 @@ dtl_verbose = $(dtl_verbose_$(V)) # Core targets. -define erlydtl_compile.erl - [begin - Module0 = case "$(strip $(DTL_FULL_PATH))" of - "" -> - filename:basename(F, ".dtl"); - _ -> - "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"), - re:replace(F2, "/", "_", [{return, list}, global]) - end, - Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"), - case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of - ok -> ok; - {ok, _} -> ok - end - end || F <- string:tokens("$(1)", " ")], - halt(). -endef - -ifneq ($(wildcard src/),) - DTL_FILES = $(sort $(call core_find,$(DTL_PATH),*.dtl)) +ifneq ($(DTL_FILES),) + ifdef DTL_FULL_PATH BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(subst /,_,$(DTL_FILES:$(DTL_PATH)%=%)))) else @@ -4701,7 +4668,7 @@ BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(notdir $(DTL_FILES endif ifneq ($(words $(DTL_FILES)),0) -# Rebuild everything when the Makefile changes. +# Rebuild templates when the Makefile changes. $(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) @mkdir -p $(ERLANG_MK_TMP) @if test -f $@; then \ @@ -4712,9 +4679,28 @@ $(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl endif -ebin/$(PROJECT).app:: $(DTL_FILES) +define erlydtl_compile.erl + [begin + Module0 = case "$(strip $(DTL_FULL_PATH))" of + "" -> + filename:basename(F, ".dtl"); + _ -> + "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"), + re:replace(F2, "/", "_", [{return, list}, global]) + end, + Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"), + case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of + ok -> ok; + {ok, _} -> ok + end + end || F <- string:tokens("$(1)", " ")], + halt(). +endef + +ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/ $(if $(strip $?),\ - $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?,-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/))) + $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?),-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/)) + endif # Copyright (c) 2015, Loïc Hoguin @@ -4888,51 +4874,79 @@ $(PROJECT).d:: $(XRL_FILES) $(YRL_FILES) # Erlang and Core Erlang files. define makedep.erl + E = ets:new(makedep, [bag]), + G = digraph:new([acyclic]), ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")), - Modules = [{filename:basename(F, ".erl"), F} || F <- ErlFiles], - Add = fun (Dep, Acc) -> - case lists:keyfind(atom_to_list(Dep), 1, Modules) of - {_, DepFile} -> [DepFile|Acc]; - false -> Acc + Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles], + Add = fun (Mod, Dep) -> + case lists:keyfind(Dep, 1, Modules) of + false -> ok; + {_, DepFile} -> + {_, ModFile} = lists:keyfind(Mod, 1, Modules), + ets:insert(E, {ModFile, DepFile}), + digraph:add_vertex(G, Mod), + digraph:add_vertex(G, Dep), + digraph:add_edge(G, Mod, Dep) end end, - AddHd = fun (Dep, Acc) -> - case {Dep, lists:keymember(Dep, 2, Modules)} of - {"src/" ++ _, false} -> [Dep|Acc]; - {"include/" ++ _, false} -> [Dep|Acc]; - _ -> Acc + AddHd = fun (F, Mod, DepFile) -> + case file:open(DepFile, [read]) of + {error, enoent} -> ok; + {ok, Fd} -> + F(F, Fd, Mod), + {_, ModFile} = lists:keyfind(Mod, 1, Modules), + ets:insert(E, {ModFile, DepFile}) end end, - CompileFirst = fun (Deps) -> - First0 = [case filename:extension(D) of - ".erl" -> filename:basename(D, ".erl"); - _ -> [] - end || D <- Deps], - case lists:usort(First0) of - [] -> []; - [[]] -> []; - First -> ["COMPILE_FIRST +=", [[" ", F] || F <- First], "\n"] - end + Attr = fun + (F, Mod, behavior, Dep) -> Add(Mod, Dep); + (F, Mod, behaviour, Dep) -> Add(Mod, Dep); + (F, Mod, compile, {parse_transform, Dep}) -> Add(Mod, Dep); + (F, Mod, compile, Opts) when is_list(Opts) -> + case proplists:get_value(parse_transform, Opts) of + undefined -> ok; + Dep -> Add(Mod, Dep) + end; + (F, Mod, include, Hrl) -> + case filelib:is_file("include/" ++ Hrl) of + true -> AddHd(F, Mod, "include/" ++ Hrl); + false -> + case filelib:is_file("src/" ++ Hrl) of + true -> AddHd(F, Mod, "src/" ++ Hrl); + false -> false + end + end; + (F, Mod, include_lib, "$1/include/" ++ Hrl) -> AddHd(F, Mod, "include/" ++ Hrl); + (F, Mod, include_lib, Hrl) -> AddHd(F, Mod, "include/" ++ Hrl); + (F, Mod, import, {Imp, _}) -> + case filelib:is_file("src/" ++ atom_to_list(Imp) ++ ".erl") of + false -> ok; + true -> Add(Mod, Imp) + end; + (_, _, _, _) -> ok end, - Depend = [begin - case epp:parse_file(F, ["include/"], []) of - {ok, Forms} -> - Deps = lists:usort(lists:foldl(fun - ({attribute, _, behavior, Dep}, Acc) -> Add(Dep, Acc); - ({attribute, _, behaviour, Dep}, Acc) -> Add(Dep, Acc); - ({attribute, _, compile, {parse_transform, Dep}}, Acc) -> Add(Dep, Acc); - ({attribute, _, file, {Dep, _}}, Acc) -> AddHd(Dep, Acc); - (_, Acc) -> Acc - end, [], Forms)), - case Deps of - [] -> ""; - _ -> [F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n", CompileFirst(Deps)] - end; - {error, enoent} -> - [] + MakeDepend = fun(F, Fd, Mod) -> + case io:parse_erl_form(Fd, undefined) of + {ok, {attribute, _, Key, Value}, _} -> + Attr(F, Mod, Key, Value), + F(F, Fd, Mod); + {eof, _} -> + file:close(Fd); + _ -> + F(F, Fd, Mod) end + end, + [begin + Mod = list_to_atom(filename:basename(F, ".erl")), + {ok, Fd} = file:open(F, [read]), + MakeDepend(MakeDepend, Fd, Mod) end || F <- ErlFiles], - ok = file:write_file("$(1)", Depend), + Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))), + CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)], + ok = file:write_file("$(1)", [ + [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend], + "\nCOMPILE_FIRST +=", [[" ", atom_to_list(CF)] || CF <- CompileFirst], "\n" + ]), halt() endef @@ -5069,6 +5083,11 @@ test-dir: $(call core_find,$(TEST_DIR)/,*.erl) -pa ebin/ endif +ifeq ($(wildcard src),) +test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS) +test-build:: clean deps test-deps + $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(TEST_ERLC_OPTS)" +else ifeq ($(wildcard ebin/test),) test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS) test-build:: clean deps test-deps $(PROJECT).d @@ -5086,6 +5105,7 @@ clean-test-dir: ifneq ($(wildcard $(TEST_DIR)/*.beam),) $(gen_verbose) rm -f $(TEST_DIR)/*.beam endif +endif # Copyright (c) 2015, Loïc Hoguin # This file is part of erlang.mk and subject to the terms of the ISC License. @@ -5103,11 +5123,14 @@ $(if $(filter-out -Werror,$1),\ $(shell echo $1 | cut -b 2-))) endef +define compat_erlc_opts_to_list + [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))] +endef + define compat_rebar_config {deps, [$(call comma_list,$(foreach d,$(DEPS),\ {$(call dep_name,$d),".*",{git,"$(call dep_repo,$d)","$(call dep_commit,$d)"}}))]}. -{erl_opts, [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$(ERLC_OPTS)),\ - $(call compat_convert_erlc_opts,$o)))]}. +{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}. endef $(eval _compat_rebar_config = $$(compat_rebar_config)) @@ -5126,12 +5149,12 @@ MAN_SECTIONS ?= 3 7 docs:: asciidoc -asciidoc: distclean-asciidoc doc-deps asciidoc-guide asciidoc-manual +asciidoc: asciidoc-guide asciidoc-manual ifeq ($(wildcard doc/src/guide/book.asciidoc),) asciidoc-guide: else -asciidoc-guide: +asciidoc-guide: distclean-asciidoc doc-deps a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/ endif @@ -5139,7 +5162,7 @@ endif ifeq ($(wildcard doc/src/manual/*.asciidoc),) asciidoc-manual: else -asciidoc-manual: +asciidoc-manual: distclean-asciidoc doc-deps for f in doc/src/manual/*.asciidoc ; do \ a2x -v -f manpage $$f ; \ done @@ -5154,7 +5177,7 @@ install-docs:: install-asciidoc install-asciidoc: asciidoc-manual for s in $(MAN_SECTIONS); do \ mkdir -p $(MAN_INSTALL_PATH)/man$$s/ ; \ - install -g 0 -o 0 -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \ + install -g `id -u` -o `id -g` -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \ done endif @@ -5214,6 +5237,8 @@ define bs_appsrc_lib ]}. endef +# To prevent autocompletion issues with ZSH, we add "include erlang.mk" +# separately during the actual bootstrap. ifdef SP define bs_Makefile PROJECT = $p @@ -5223,17 +5248,21 @@ PROJECT_VERSION = 0.0.1 # Whitespace to be used when creating files from templates. SP = $(SP) -include erlang.mk endef else define bs_Makefile PROJECT = $p -include erlang.mk +PROJECT_DESCRIPTION = New project +PROJECT_VERSION = 0.0.1 + endef endif define bs_apps_Makefile PROJECT = $p +PROJECT_DESCRIPTION = New project +PROJECT_VERSION = 0.0.1 + include $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)/erlang.mk endef @@ -5527,6 +5556,7 @@ endif $(eval p := $(PROJECT)) $(eval n := $(PROJECT)_sup) $(call render_template,bs_Makefile,Makefile) + $(verbose) echo "include erlang.mk" >> Makefile $(verbose) mkdir src/ ifdef LEGACY $(call render_template,bs_appsrc,src/$(PROJECT).app.src) @@ -5540,6 +5570,7 @@ ifneq ($(wildcard src/),) endif $(eval p := $(PROJECT)) $(call render_template,bs_Makefile,Makefile) + $(verbose) echo "include erlang.mk" >> Makefile $(verbose) mkdir src/ ifdef LEGACY $(call render_template,bs_appsrc_lib,src/$(PROJECT).app.src) @@ -5620,12 +5651,32 @@ list-templates: C_SRC_DIR ?= $(CURDIR)/c_src C_SRC_ENV ?= $(C_SRC_DIR)/env.mk -C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT).so +C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT) C_SRC_TYPE ?= shared # System type and C compiler/flags. -ifeq ($(PLATFORM),darwin) +ifeq ($(PLATFORM),msys2) + C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe + C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll +else + C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= + C_SRC_OUTPUT_SHARED_EXTENSION ?= .so +endif + +ifeq ($(C_SRC_TYPE),shared) + C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION) +else + C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION) +endif + +ifeq ($(PLATFORM),msys2) +# We hardcode the compiler used on MSYS2. The default CC=cc does +# not produce working code. The "gcc" MSYS2 package also doesn't. + CC = /mingw64/bin/gcc + CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes + CXXFLAGS ?= -O3 -finline-functions -Wall +else ifeq ($(PLATFORM),darwin) CC ?= cc CFLAGS ?= -O3 -std=c99 -arch x86_64 -finline-functions -Wall -Wmissing-prototypes CXXFLAGS ?= -O3 -arch x86_64 -finline-functions -Wall @@ -5640,10 +5691,15 @@ else ifeq ($(PLATFORM),linux) CXXFLAGS ?= -O3 -finline-functions -Wall endif -CFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR) -CXXFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR) +ifneq ($(PLATFORM),msys2) + CFLAGS += -fPIC + CXXFLAGS += -fPIC +endif + +CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)" +CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)" -LDLIBS += -L $(ERL_INTERFACE_LIB_DIR) -lerl_interface -lei +LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lerl_interface -lei # Verbosity. @@ -5680,15 +5736,15 @@ OBJECTS = $(addsuffix .o, $(basename $(SOURCES))) COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c -app:: $(C_SRC_ENV) $(C_SRC_OUTPUT) +app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE) -test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT) +test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE) -$(C_SRC_OUTPUT): $(OBJECTS) +$(C_SRC_OUTPUT_FILE): $(OBJECTS) $(verbose) mkdir -p priv/ $(link_verbose) $(CC) $(OBJECTS) \ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \ - -o $(C_SRC_OUTPUT) + -o $(C_SRC_OUTPUT_FILE) %.o: %.c $(COMPILE_C) $(OUTPUT_OPTION) $< @@ -5705,13 +5761,13 @@ $(C_SRC_OUTPUT): $(OBJECTS) clean:: clean-c_src clean-c_src: - $(gen_verbose) rm -f $(C_SRC_OUTPUT) $(OBJECTS) + $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS) endif ifneq ($(wildcard $(C_SRC_DIR)),) $(C_SRC_ENV): - $(verbose) $(ERL) -eval "file:write_file(\"$(C_SRC_ENV)\", \ + $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \ io_lib:format( \ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \ @@ -5889,7 +5945,7 @@ endif # Copyright (c) 2013-2015, Loïc Hoguin # This file is part of erlang.mk and subject to the terms of the ISC License. -.PHONY: ct distclean-ct +.PHONY: ct apps-ct distclean-ct # Configuration. @@ -5924,17 +5980,33 @@ CT_RUN = ct_run \ -logdir $(CURDIR)/logs ifeq ($(CT_SUITES),) -ct: +ct: $(if $(IS_APP),,apps-ct) else -ct: test-build +ct: test-build $(if $(IS_APP),,apps-ct) $(verbose) mkdir -p $(CURDIR)/logs/ $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS) endif +ifneq ($(ALL_APPS_DIRS),) +apps-ct: + $(verbose) for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app ct IS_APP=1; done +endif + +ifndef t +CT_EXTRA = +else +ifeq (,$(findstring :,$t)) +CT_EXTRA = -group $t +else +t_words = $(subst :, ,$t) +CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words)) +endif +endif + define ct_suite_target ct-$(1): test-build $(verbose) mkdir -p $(CURDIR)/logs/ - $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(1)) $(CT_OPTS) + $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS) endef $(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test)))) @@ -5953,9 +6025,8 @@ DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt export DIALYZER_PLT PLT_APPS ?= -DIALYZER_DIRS ?= --src -r src -DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions \ - -Wunmatched_returns # -Wunderspecs +DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS) +DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs # Core targets. @@ -5971,6 +6042,18 @@ help:: # Plugin-specific targets. +define filter_opts.erl + Opts = binary:split(<<"$1">>, <<"-">>, [global]), + Filtered = lists:reverse(lists:foldl(fun + (O = <<"pa ", _/bits>>, Acc) -> [O|Acc]; + (O = <<"D ", _/bits>>, Acc) -> [O|Acc]; + (O = <<"I ", _/bits>>, Acc) -> [O|Acc]; + (_, Acc) -> Acc + end, [], Opts)), + io:format("~s~n", [[["-", O] || O <- Filtered]]), + halt(). +endef + $(DIALYZER_PLT): deps app $(verbose) dialyzer --build_plt --apps erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS) @@ -5984,47 +6067,32 @@ dialyze: else dialyze: $(DIALYZER_PLT) endif - $(verbose) dialyzer --no_native $(DIALYZER_DIRS) $(DIALYZER_OPTS) + $(verbose) dialyzer --no_native `$(call erlang,$(call filter_opts.erl,$(ERLC_OPTS)))` $(DIALYZER_DIRS) $(DIALYZER_OPTS) -# Copyright (c) 2015, Erlang Solutions Ltd. +# Copyright (c) 2013-2015, Loïc Hoguin # This file is part of erlang.mk and subject to the terms of the ISC License. -.PHONY: elvis distclean-elvis +.PHONY: distclean-edoc edoc # Configuration. -ELVIS_CONFIG ?= $(CURDIR)/elvis.config - -ELVIS ?= $(CURDIR)/elvis -export ELVIS - -ELVIS_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis -ELVIS_CONFIG_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis.config -ELVIS_OPTS ?= +EDOC_OPTS ?= # Core targets. -help:: - $(verbose) printf "%s\n" "" \ - "Elvis targets:" \ - " elvis Run Elvis using the local elvis.config or download the default otherwise" +ifneq ($(wildcard doc/overview.edoc),) +docs:: edoc +endif -distclean:: distclean-elvis +distclean:: distclean-edoc # Plugin-specific targets. -$(ELVIS): - $(gen_verbose) $(call core_http_get,$(ELVIS),$(ELVIS_URL)) - $(verbose) chmod +x $(ELVIS) - -$(ELVIS_CONFIG): - $(verbose) $(call core_http_get,$(ELVIS_CONFIG),$(ELVIS_CONFIG_URL)) - -elvis: $(ELVIS) $(ELVIS_CONFIG) - $(verbose) $(ELVIS) rock -c $(ELVIS_CONFIG) $(ELVIS_OPTS) +edoc: distclean-edoc doc-deps + $(gen_verbose) $(ERL) -eval 'edoc:application($(PROJECT), ".", [$(EDOC_OPTS)]), halt().' -distclean-elvis: - $(gen_verbose) rm -rf $(ELVIS) +distclean-edoc: + $(gen_verbose) rm -f doc/*.css doc/*.html doc/*.png doc/edoc-info # Copyright (c) 2014 Dave Cottlehuber # This file is part of erlang.mk and subject to the terms of the ISC License. @@ -6095,11 +6163,12 @@ distclean-escript: # Copyright (c) 2015, Loïc Hoguin # This file is contributed to erlang.mk and subject to the terms of the ISC License. -.PHONY: eunit +.PHONY: eunit apps-eunit # Configuration EUNIT_OPTS ?= +EUNIT_ERL_OPTS ?= # Core targets. @@ -6121,7 +6190,7 @@ define eunit.erl _ -> ok end end, - case eunit:test([$(call comma_list,$(1))], [$(EUNIT_OPTS)]) of + case eunit:test($1, [$(EUNIT_OPTS)]) of ok -> ok; error -> halt(2) end, @@ -6133,14 +6202,30 @@ define eunit.erl halt() endef +EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(DEPS_DIR)/*/ebin $(APPS_DIR)/*/ebin ebin + +ifdef t +ifeq (,$(findstring :,$(t))) +eunit: test-build + $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS)) +else +eunit: test-build + $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS)) +endif +else EUNIT_EBIN_MODS = $(notdir $(basename $(call core_find,ebin/,*.beam))) EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.beam))) EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \ - $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),{module,'$(mod)'}) + $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)') -eunit: test-build - $(gen_verbose) $(ERL) -pa $(TEST_DIR) $(DEPS_DIR)/*/ebin ebin \ - -eval "$(subst $(newline),,$(subst ",\",$(call eunit.erl,$(EUNIT_MODS))))" +eunit: test-build $(if $(IS_APP),,apps-eunit) + $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS)) + +ifneq ($(ALL_APPS_DIRS),) +apps-eunit: + $(verbose) for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; done +endif +endif # Copyright (c) 2013-2015, Loïc Hoguin # This file is part of erlang.mk and subject to the terms of the ISC License. From ff951ac32deebd7e5f867459a12a9e3799097c74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Thu, 7 Apr 2016 18:11:36 +0200 Subject: [PATCH 115/174] Switch testsuite to common_test, part #2 The migrated tests are those from `rabbit_tests.erl`. References #725. [#116526487] --- test/dummy_event_receiver.erl | 58 + test/dummy_runtime_parameters.erl | 72 + test/rabbit_ct_broker_helpers.erl | 78 - test/sup_delayed_restart_SUITE.erl | 91 + test/unit_SUITE.erl | 525 +++- test/unit_inbroker_SUITE.erl | 3936 ++++++++++++++++++++++++++-- 6 files changed, 4464 insertions(+), 296 deletions(-) create mode 100644 test/dummy_event_receiver.erl create mode 100644 test/dummy_runtime_parameters.erl delete mode 100644 test/rabbit_ct_broker_helpers.erl create mode 100644 test/sup_delayed_restart_SUITE.erl diff --git a/test/dummy_event_receiver.erl b/test/dummy_event_receiver.erl new file mode 100644 index 000000000000..75db3678ced1 --- /dev/null +++ b/test/dummy_event_receiver.erl @@ -0,0 +1,58 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. +%% + +-module(dummy_event_receiver). + +-export([start/3, stop/0]). + +-export([init/1, handle_call/2, handle_event/2, handle_info/2, + terminate/2, code_change/3]). + +-include("rabbit.hrl"). + +start(Pid, Nodes, Types) -> + Oks = [ok || _ <- Nodes], + {Oks, _} = rpc:multicall(Nodes, gen_event, add_handler, + [rabbit_event, ?MODULE, [Pid, Types]]). + +stop() -> + gen_event:delete_handler(rabbit_event, ?MODULE, []). + +%%---------------------------------------------------------------------------- + +init([Pid, Types]) -> + {ok, {Pid, Types}}. + +handle_call(_Request, State) -> + {ok, not_understood, State}. + +handle_event(Event = #event{type = Type}, State = {Pid, Types}) -> + case lists:member(Type, Types) of + true -> Pid ! Event; + false -> ok + end, + {ok, State}. + +handle_info(_Info, State) -> + {ok, State}. + +terminate(_Arg, _State) -> + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%%---------------------------------------------------------------------------- diff --git a/test/dummy_runtime_parameters.erl b/test/dummy_runtime_parameters.erl new file mode 100644 index 000000000000..d80ec785d0d6 --- /dev/null +++ b/test/dummy_runtime_parameters.erl @@ -0,0 +1,72 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. +%% + +-module(dummy_runtime_parameters). +-behaviour(rabbit_runtime_parameter). +-behaviour(rabbit_policy_validator). + +-include("rabbit.hrl"). + +-export([validate/5, notify/4, notify_clear/3]). +-export([register/0, unregister/0]). +-export([validate_policy/1]). +-export([register_policy_validator/0, unregister_policy_validator/0]). + +%---------------------------------------------------------------------------- + +register() -> + rabbit_registry:register(runtime_parameter, <<"test">>, ?MODULE). + +unregister() -> + rabbit_registry:unregister(runtime_parameter, <<"test">>). + +validate(_, <<"test">>, <<"good">>, _Term, _User) -> ok; +validate(_, <<"test">>, <<"maybe">>, <<"good">>, _User) -> ok; +validate(_, <<"test">>, <<"admin">>, _Term, none) -> ok; +validate(_, <<"test">>, <<"admin">>, _Term, User) -> + case lists:member(administrator, User#user.tags) of + true -> ok; + false -> {error, "meh", []} + end; +validate(_, <<"test">>, _, _, _) -> {error, "meh", []}. + +notify(_, _, _, _) -> ok. +notify_clear(_, _, _) -> ok. + +%---------------------------------------------------------------------------- + +register_policy_validator() -> + rabbit_registry:register(policy_validator, <<"testeven">>, ?MODULE), + rabbit_registry:register(policy_validator, <<"testpos">>, ?MODULE). + +unregister_policy_validator() -> + rabbit_registry:unregister(policy_validator, <<"testeven">>), + rabbit_registry:unregister(policy_validator, <<"testpos">>). + +validate_policy([{<<"testeven">>, Terms}]) when is_list(Terms) -> + case length(Terms) rem 2 =:= 0 of + true -> ok; + false -> {error, "meh", []} + end; + +validate_policy([{<<"testpos">>, Terms}]) when is_list(Terms) -> + case lists:all(fun (N) -> is_integer(N) andalso N > 0 end, Terms) of + true -> ok; + false -> {error, "meh", []} + end; + +validate_policy(_) -> + {error, "meh", []}. diff --git a/test/rabbit_ct_broker_helpers.erl b/test/rabbit_ct_broker_helpers.erl deleted file mode 100644 index 52e3b67dd14d..000000000000 --- a/test/rabbit_ct_broker_helpers.erl +++ /dev/null @@ -1,78 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License -%% at http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See -%% the License for the specific language governing rights and -%% limitations under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developer of the Original Code is GoPivotal, Inc. -%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. -%% - --module(rabbit_ct_broker_helpers). - --include_lib("rabbit_common/include/rabbit.hrl"). - --export([ - run_on_broker/4, - find_listener/0, - test_channel/0 - ]). - -run_on_broker(Node, Module, Function, Args) -> - %% We add some directories to the broker node search path. - Path1 = filename:dirname(code:which(Module)), - Path2 = filename:dirname(code:which(?MODULE)), - Paths = lists:usort([Path1, Path2]), - ExistingPaths = rpc:call(Node, code, get_path, []), - lists:foreach( - fun(P) -> - case lists:member(P, ExistingPaths) of - true -> ok; - false -> true = rpc:call(Node, code, add_pathz, [P]) - end - end, Paths), - %% If there is an exception, rpc:call/4 returns the exception as - %% a "normal" return value. If there is an exit signal, we raise - %% it again. In both cases, we have no idea of the module and line - %% number which triggered the issue. - case rpc:call(Node, Module, Function, Args) of - {badrpc, {'EXIT', Reason}} -> exit(Reason); - {badrpc, Reason} -> exit(Reason); - Ret -> Ret - end. - -find_listener() -> - [#listener{host = H, port = P} | _] = - [L || L = #listener{node = N, protocol = amqp} - <- rabbit_networking:active_listeners(), - N =:= node()], - {H, P}. - -user(Username) -> - #user{username = Username, - tags = [administrator], - authz_backends = [{rabbit_auth_backend_internal, none}]}. - -test_channel() -> - Me = self(), - Writer = spawn(fun () -> test_writer(Me) end), - {ok, Limiter} = rabbit_limiter:start_link(no_id), - {ok, Ch} = rabbit_channel:start_link( - 1, Me, Writer, Me, "", rabbit_framing_amqp_0_9_1, - user(<<"guest">>), <<"/">>, [], Me, Limiter), - {Writer, Limiter, Ch}. - -test_writer(Pid) -> - receive - {'$gen_call', From, flush} -> gen_server:reply(From, ok), - test_writer(Pid); - {send_command, Method} -> Pid ! Method, - test_writer(Pid); - shutdown -> ok - end. diff --git a/test/sup_delayed_restart_SUITE.erl b/test/sup_delayed_restart_SUITE.erl new file mode 100644 index 000000000000..e495f57d0e8e --- /dev/null +++ b/test/sup_delayed_restart_SUITE.erl @@ -0,0 +1,91 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. +%% + +-module(sup_delayed_restart_SUITE). + +-behaviour(supervisor2). + +-include_lib("common_test/include/ct.hrl"). + +-compile(export_all). + +all() -> + [ + delayed_restart + ]. + +%%---------------------------------------------------------------------------- +%% Public API +%%---------------------------------------------------------------------------- + +delayed_restart(_Config) -> + passed = with_sup(simple_one_for_one, + fun (SupPid) -> + {ok, _ChildPid} = + supervisor2:start_child(SupPid, []), + test_supervisor_delayed_restart(SupPid) + end), + passed = with_sup(one_for_one, fun test_supervisor_delayed_restart/1). + +test_supervisor_delayed_restart(SupPid) -> + ok = ping_child(SupPid), + ok = exit_child(SupPid), + timer:sleep(100), + ok = ping_child(SupPid), + ok = exit_child(SupPid), + timer:sleep(100), + timeout = ping_child(SupPid), + timer:sleep(1010), + ok = ping_child(SupPid), + passed. + +with_sup(RestartStrategy, Fun) -> + {ok, SupPid} = supervisor2:start_link(?MODULE, [RestartStrategy]), + Res = Fun(SupPid), + unlink(SupPid), + exit(SupPid, shutdown), + Res. + +init([RestartStrategy]) -> + {ok, {{RestartStrategy, 1, 1}, + [{test, {?MODULE, start_child, []}, {permanent, 1}, + 16#ffffffff, worker, [?MODULE]}]}}. + +start_child() -> + {ok, proc_lib:spawn_link(fun run_child/0)}. + +ping_child(SupPid) -> + Ref = make_ref(), + with_child_pid(SupPid, fun(ChildPid) -> ChildPid ! {ping, Ref, self()} end), + receive {pong, Ref} -> ok + after 1000 -> timeout + end. + +exit_child(SupPid) -> + with_child_pid(SupPid, fun(ChildPid) -> exit(ChildPid, abnormal) end), + ok. + +with_child_pid(SupPid, Fun) -> + case supervisor2:which_children(SupPid) of + [{_Id, undefined, worker, [?MODULE]}] -> ok; + [{_Id, ChildPid, worker, [?MODULE]}] -> Fun(ChildPid); + [] -> ok + end. + +run_child() -> + receive {ping, Ref, Pid} -> Pid ! {pong, Ref}, + run_child() + end. diff --git a/test/unit_SUITE.erl b/test/unit_SUITE.erl index eddcba5803a4..ba0f43f11e34 100644 --- a/test/unit_SUITE.erl +++ b/test/unit_SUITE.erl @@ -17,6 +17,8 @@ -module(unit_SUITE). -include_lib("common_test/include/ct.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("rabbit_common/include/rabbit_framing.hrl"). -compile(export_all). @@ -28,6 +30,21 @@ all() -> groups() -> [ {parallel_tests, [parallel], [ + arguments_parser, + filtering_flags_parsing, + {basic_header_handling, [parallel], [ + write_table_with_invalid_existing_type, + invalid_existing_headers, + disparate_invalid_header_entries_accumulate_separately, + corrupt_or_invalid_headers_are_overwritten, + invalid_same_header_entry_accumulation + ]}, + content_framing, + content_transcoding, + pg_local, + pmerge, + plmerge, + priority_queue, {resource_monitor, [parallel], [ parse_information_unit ]}, @@ -35,11 +52,14 @@ groups() -> check_shutdown_stop, check_shutdown_ignored ]}, + table_codec, {truncate, [parallel], [ short_examples_exactly, term_limit, large_examples_for_size ]}, + unfold, + version_equivalance, {vm_memory_monitor, [parallel], [ parse_line_linux ]} @@ -49,8 +69,348 @@ groups() -> init_per_group(_, Config) -> Config. end_per_group(_, Config) -> Config. +%% ------------------------------------------------------------------- +%% Argument parsing. +%% ------------------------------------------------------------------- + +arguments_parser(_Config) -> + GlobalOpts1 = [{"-f1", flag}, {"-o1", {option, "foo"}}], + Commands1 = [command1, {command2, [{"-f2", flag}, {"-o2", {option, "bar"}}]}], + + GetOptions = + fun (Args) -> + rabbit_cli:parse_arguments(Commands1, GlobalOpts1, "-n", Args) + end, + + check_parse_arguments(no_command, GetOptions, []), + check_parse_arguments(no_command, GetOptions, ["foo", "bar"]), + check_parse_arguments( + {ok, {command1, [{"-f1", false}, {"-o1", "foo"}], []}}, + GetOptions, ["command1"]), + check_parse_arguments( + {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], []}}, + GetOptions, ["command1", "-o1", "blah"]), + check_parse_arguments( + {ok, {command1, [{"-f1", true}, {"-o1", "foo"}], []}}, + GetOptions, ["command1", "-f1"]), + check_parse_arguments( + {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], []}}, + GetOptions, ["-o1", "blah", "command1"]), + check_parse_arguments( + {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], ["quux"]}}, + GetOptions, ["-o1", "blah", "command1", "quux"]), + check_parse_arguments( + {ok, {command1, [{"-f1", true}, {"-o1", "blah"}], ["quux", "baz"]}}, + GetOptions, ["command1", "quux", "-f1", "-o1", "blah", "baz"]), + %% For duplicate flags, the last one counts + check_parse_arguments( + {ok, {command1, [{"-f1", false}, {"-o1", "second"}], []}}, + GetOptions, ["-o1", "first", "command1", "-o1", "second"]), + %% If the flag "eats" the command, the command won't be recognised + check_parse_arguments(no_command, GetOptions, + ["-o1", "command1", "quux"]), + %% If a flag eats another flag, the eaten flag won't be recognised + check_parse_arguments( + {ok, {command1, [{"-f1", false}, {"-o1", "-f1"}], []}}, + GetOptions, ["command1", "-o1", "-f1"]), + + %% Now for some command-specific flags... + check_parse_arguments( + {ok, {command2, [{"-f1", false}, {"-f2", false}, + {"-o1", "foo"}, {"-o2", "bar"}], []}}, + GetOptions, ["command2"]), + + check_parse_arguments( + {ok, {command2, [{"-f1", false}, {"-f2", true}, + {"-o1", "baz"}, {"-o2", "bar"}], ["quux", "foo"]}}, + GetOptions, ["-f2", "command2", "quux", "-o1", "baz", "foo"]), + + passed. + +check_parse_arguments(ExpRes, Fun, As) -> + SortRes = + fun (no_command) -> no_command; + ({ok, {C, KVs, As1}}) -> {ok, {C, lists:sort(KVs), As1}} + end, + + true = SortRes(ExpRes) =:= SortRes(Fun(As)). + +filtering_flags_parsing(_Config) -> + Cases = [{[], [], []} + ,{[{"--online", true}], ["--offline", "--online", "--third-option"], [false, true, false]} + ,{[{"--online", true}, {"--third-option", true}, {"--offline", true}], ["--offline", "--online", "--third-option"], [true, true, true]} + ,{[], ["--offline", "--online", "--third-option"], [true, true, true]} + ], + lists:foreach(fun({Vals, Opts, Expect}) -> + case rabbit_cli:filter_opts(Vals, Opts) of + Expect -> + ok; + Got -> + exit({no_match, Got, Expect, {args, Vals, Opts}}) + end + end, + Cases). + +%% ------------------------------------------------------------------- +%% basic_header_handling. +%% ------------------------------------------------------------------- + +-define(XDEATH_TABLE, + [{<<"reason">>, longstr, <<"blah">>}, + {<<"queue">>, longstr, <<"foo.bar.baz">>}, + {<<"exchange">>, longstr, <<"my-exchange">>}, + {<<"routing-keys">>, array, []}]). + +-define(ROUTE_TABLE, [{<<"redelivered">>, bool, <<"true">>}]). + +-define(BAD_HEADER(K), {<>, longstr, <<"bad ", K>>}). +-define(BAD_HEADER2(K, Suf), {<>, longstr, <<"bad ", K, Suf>>}). +-define(FOUND_BAD_HEADER(K), {<>, array, [{longstr, <<"bad ", K>>}]}). + +write_table_with_invalid_existing_type(_Config) -> + prepend_check(<<"header1">>, ?XDEATH_TABLE, [?BAD_HEADER("header1")]). + +invalid_existing_headers(_Config) -> + Headers = + prepend_check(<<"header2">>, ?ROUTE_TABLE, [?BAD_HEADER("header2")]), + {array, [{table, ?ROUTE_TABLE}]} = + rabbit_misc:table_lookup(Headers, <<"header2">>), + passed. + +disparate_invalid_header_entries_accumulate_separately(_Config) -> + BadHeaders = [?BAD_HEADER("header2")], + Headers = prepend_check(<<"header2">>, ?ROUTE_TABLE, BadHeaders), + Headers2 = prepend_check(<<"header1">>, ?XDEATH_TABLE, + [?BAD_HEADER("header1") | Headers]), + {table, [?FOUND_BAD_HEADER("header1"), + ?FOUND_BAD_HEADER("header2")]} = + rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY), + passed. + +corrupt_or_invalid_headers_are_overwritten(_Config) -> + Headers0 = [?BAD_HEADER("header1"), + ?BAD_HEADER("x-invalid-headers")], + Headers1 = prepend_check(<<"header1">>, ?XDEATH_TABLE, Headers0), + {table,[?FOUND_BAD_HEADER("header1"), + ?FOUND_BAD_HEADER("x-invalid-headers")]} = + rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY), + passed. + +invalid_same_header_entry_accumulation(_Config) -> + BadHeader1 = ?BAD_HEADER2("header1", "a"), + Headers = prepend_check(<<"header1">>, ?ROUTE_TABLE, [BadHeader1]), + Headers2 = prepend_check(<<"header1">>, ?ROUTE_TABLE, + [?BAD_HEADER2("header1", "b") | Headers]), + {table, InvalidHeaders} = + rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY), + {array, [{longstr,<<"bad header1b">>}, + {longstr,<<"bad header1a">>}]} = + rabbit_misc:table_lookup(InvalidHeaders, <<"header1">>), + passed. + +prepend_check(HeaderKey, HeaderTable, Headers) -> + Headers1 = rabbit_basic:prepend_table_header( + HeaderKey, HeaderTable, Headers), + {table, Invalid} = + rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY), + {Type, Value} = rabbit_misc:table_lookup(Headers, HeaderKey), + {array, [{Type, Value} | _]} = + rabbit_misc:table_lookup(Invalid, HeaderKey), + Headers1. + +%% ------------------------------------------------------------------- +%% pg_local. +%% ------------------------------------------------------------------- + +pg_local(_Config) -> + [P, Q] = [spawn(fun () -> receive X -> X end end) || _ <- [x, x]], + check_pg_local(ok, [], []), + check_pg_local(pg_local:join(a, P), [P], []), + check_pg_local(pg_local:join(b, P), [P], [P]), + check_pg_local(pg_local:join(a, P), [P, P], [P]), + check_pg_local(pg_local:join(a, Q), [P, P, Q], [P]), + check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q]), + check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q, Q]), + check_pg_local(pg_local:leave(a, P), [P, Q], [P, Q, Q]), + check_pg_local(pg_local:leave(b, P), [P, Q], [Q, Q]), + check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]), + check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]), + [begin X ! done, + Ref = erlang:monitor(process, X), + receive {'DOWN', Ref, process, X, _Info} -> ok end + end || X <- [P, Q]], + check_pg_local(ok, [], []), + passed. + +check_pg_local(ok, APids, BPids) -> + ok = pg_local:sync(), + [true, true] = [lists:sort(Pids) == lists:sort(pg_local:get_members(Key)) || + {Key, Pids} <- [{a, APids}, {b, BPids}]]. + +%% ------------------------------------------------------------------- +%% priority_queue. +%% ------------------------------------------------------------------- + +priority_queue(_Config) -> + + false = priority_queue:is_queue(not_a_queue), + + %% empty Q + Q = priority_queue:new(), + {true, true, 0, [], []} = test_priority_queue(Q), + + %% 1-4 element no-priority Q + true = lists:all(fun (X) -> X =:= passed end, + lists:map(fun test_simple_n_element_queue/1, + lists:seq(1, 4))), + + %% 1-element priority Q + Q1 = priority_queue:in(foo, 1, priority_queue:new()), + {true, false, 1, [{1, foo}], [foo]} = + test_priority_queue(Q1), + + %% 2-element same-priority Q + Q2 = priority_queue:in(bar, 1, Q1), + {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} = + test_priority_queue(Q2), + + %% 2-element different-priority Q + Q3 = priority_queue:in(bar, 2, Q1), + {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} = + test_priority_queue(Q3), + + %% 1-element negative priority Q + Q4 = priority_queue:in(foo, -1, priority_queue:new()), + {true, false, 1, [{-1, foo}], [foo]} = test_priority_queue(Q4), + + %% merge 2 * 1-element no-priority Qs + Q5 = priority_queue:join(priority_queue:in(foo, Q), + priority_queue:in(bar, Q)), + {true, false, 2, [{0, foo}, {0, bar}], [foo, bar]} = + test_priority_queue(Q5), + + %% merge 1-element no-priority Q with 1-element priority Q + Q6 = priority_queue:join(priority_queue:in(foo, Q), + priority_queue:in(bar, 1, Q)), + {true, false, 2, [{1, bar}, {0, foo}], [bar, foo]} = + test_priority_queue(Q6), + + %% merge 1-element priority Q with 1-element no-priority Q + Q7 = priority_queue:join(priority_queue:in(foo, 1, Q), + priority_queue:in(bar, Q)), + {true, false, 2, [{1, foo}, {0, bar}], [foo, bar]} = + test_priority_queue(Q7), + + %% merge 2 * 1-element same-priority Qs + Q8 = priority_queue:join(priority_queue:in(foo, 1, Q), + priority_queue:in(bar, 1, Q)), + {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} = + test_priority_queue(Q8), + + %% merge 2 * 1-element different-priority Qs + Q9 = priority_queue:join(priority_queue:in(foo, 1, Q), + priority_queue:in(bar, 2, Q)), + {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} = + test_priority_queue(Q9), + + %% merge 2 * 1-element different-priority Qs (other way around) + Q10 = priority_queue:join(priority_queue:in(bar, 2, Q), + priority_queue:in(foo, 1, Q)), + {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} = + test_priority_queue(Q10), + + %% merge 2 * 2-element multi-different-priority Qs + Q11 = priority_queue:join(Q6, Q5), + {true, false, 4, [{1, bar}, {0, foo}, {0, foo}, {0, bar}], + [bar, foo, foo, bar]} = test_priority_queue(Q11), + + %% and the other way around + Q12 = priority_queue:join(Q5, Q6), + {true, false, 4, [{1, bar}, {0, foo}, {0, bar}, {0, foo}], + [bar, foo, bar, foo]} = test_priority_queue(Q12), + + %% merge with negative priorities + Q13 = priority_queue:join(Q4, Q5), + {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} = + test_priority_queue(Q13), + + %% and the other way around + Q14 = priority_queue:join(Q5, Q4), + {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} = + test_priority_queue(Q14), + + %% joins with empty queues: + Q1 = priority_queue:join(Q, Q1), + Q1 = priority_queue:join(Q1, Q), + + %% insert with priority into non-empty zero-priority queue + Q15 = priority_queue:in(baz, 1, Q5), + {true, false, 3, [{1, baz}, {0, foo}, {0, bar}], [baz, foo, bar]} = + test_priority_queue(Q15), + + %% 1-element infinity priority Q + Q16 = priority_queue:in(foo, infinity, Q), + {true, false, 1, [{infinity, foo}], [foo]} = test_priority_queue(Q16), + + %% add infinity to 0-priority Q + Q17 = priority_queue:in(foo, infinity, priority_queue:in(bar, Q)), + {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} = + test_priority_queue(Q17), + + %% and the other way around + Q18 = priority_queue:in(bar, priority_queue:in(foo, infinity, Q)), + {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} = + test_priority_queue(Q18), + + %% add infinity to mixed-priority Q + Q19 = priority_queue:in(qux, infinity, Q3), + {true, false, 3, [{infinity, qux}, {2, bar}, {1, foo}], [qux, bar, foo]} = + test_priority_queue(Q19), + + %% merge the above with a negative priority Q + Q20 = priority_queue:join(Q19, Q4), + {true, false, 4, [{infinity, qux}, {2, bar}, {1, foo}, {-1, foo}], + [qux, bar, foo, foo]} = test_priority_queue(Q20), + + %% merge two infinity priority queues + Q21 = priority_queue:join(priority_queue:in(foo, infinity, Q), + priority_queue:in(bar, infinity, Q)), + {true, false, 2, [{infinity, foo}, {infinity, bar}], [foo, bar]} = + test_priority_queue(Q21), + + %% merge two mixed priority with infinity queues + Q22 = priority_queue:join(Q18, Q20), + {true, false, 6, [{infinity, foo}, {infinity, qux}, {2, bar}, {1, foo}, + {0, bar}, {-1, foo}], [foo, qux, bar, foo, bar, foo]} = + test_priority_queue(Q22), + + passed. + +priority_queue_in_all(Q, L) -> + lists:foldl(fun (X, Acc) -> priority_queue:in(X, Acc) end, Q, L). + +priority_queue_out_all(Q) -> + case priority_queue:out(Q) of + {empty, _} -> []; + {{value, V}, Q1} -> [V | priority_queue_out_all(Q1)] + end. + +test_priority_queue(Q) -> + {priority_queue:is_queue(Q), + priority_queue:is_empty(Q), + priority_queue:len(Q), + priority_queue:to_list(Q), + priority_queue_out_all(Q)}. + +test_simple_n_element_queue(N) -> + Items = lists:seq(1, N), + Q = priority_queue_in_all(priority_queue:new(), Items), + ToListRes = [{0, X} || X <- Items], + {true, false, N, ToListRes, Items} = test_priority_queue(Q), + passed. + %% --------------------------------------------------------------------------- -%% rabbit_resource_monitor. +%% resource_monitor. %% --------------------------------------------------------------------------- parse_information_unit(_Config) -> @@ -210,3 +570,166 @@ parse_line_linux(_Config) -> {"MemTotal 502968 kB", {'MemTotal', 515039232}}, {"MemTotal 50296866 ", {'MemTotal', 50296866}}]), ok. + +%% --------------------------------------------------------------------------- +%% Unordered tests (originally from rabbit_tests.erl). +%% --------------------------------------------------------------------------- + +%% Test that content frames don't exceed frame-max +content_framing(_Config) -> + %% no content + passed = test_content_framing(4096, <<>>), + %% easily fit in one frame + passed = test_content_framing(4096, <<"Easy">>), + %% exactly one frame (empty frame = 8 bytes) + passed = test_content_framing(11, <<"One">>), + %% more than one frame + passed = test_content_framing(11, <<"More than one frame">>), + passed. + +test_content_framing(FrameMax, BodyBin) -> + [Header | Frames] = + rabbit_binary_generator:build_simple_content_frames( + 1, + rabbit_binary_generator:ensure_content_encoded( + rabbit_basic:build_content(#'P_basic'{}, BodyBin), + rabbit_framing_amqp_0_9_1), + FrameMax, + rabbit_framing_amqp_0_9_1), + %% header is formatted correctly and the size is the total of the + %% fragments + <<_FrameHeader:7/binary, _ClassAndWeight:4/binary, + BodySize:64/unsigned, _Rest/binary>> = list_to_binary(Header), + BodySize = size(BodyBin), + true = lists:all( + fun (ContentFrame) -> + FrameBinary = list_to_binary(ContentFrame), + %% assert + <<_TypeAndChannel:3/binary, + Size:32/unsigned, _Payload:Size/binary, 16#CE>> = + FrameBinary, + size(FrameBinary) =< FrameMax + end, Frames), + passed. + +content_transcoding(_Config) -> + %% there are no guarantees provided by 'clear' - it's just a hint + ClearDecoded = fun rabbit_binary_parser:clear_decoded_content/1, + ClearEncoded = fun rabbit_binary_generator:clear_encoded_content/1, + EnsureDecoded = + fun (C0) -> + C1 = rabbit_binary_parser:ensure_content_decoded(C0), + true = C1#content.properties =/= none, + C1 + end, + EnsureEncoded = + fun (Protocol) -> + fun (C0) -> + C1 = rabbit_binary_generator:ensure_content_encoded( + C0, Protocol), + true = C1#content.properties_bin =/= none, + C1 + end + end, + %% Beyond the assertions in Ensure*, the only testable guarantee + %% is that the operations should never fail. + %% + %% If we were using quickcheck we'd simply stuff all the above + %% into a generator for sequences of operations. In the absence of + %% quickcheck we pick particularly interesting sequences that: + %% + %% - execute every op twice since they are idempotent + %% - invoke clear_decoded, clear_encoded, decode and transcode + %% with one or both of decoded and encoded content present + [begin + sequence_with_content([Op]), + sequence_with_content([ClearEncoded, Op]), + sequence_with_content([ClearDecoded, Op]) + end || Op <- [ClearDecoded, ClearEncoded, EnsureDecoded, + EnsureEncoded(rabbit_framing_amqp_0_9_1), + EnsureEncoded(rabbit_framing_amqp_0_8)]], + passed. + +sequence_with_content(Sequence) -> + lists:foldl(fun (F, V) -> F(F(V)) end, + rabbit_binary_generator:ensure_content_encoded( + rabbit_basic:build_content(#'P_basic'{}, <<>>), + rabbit_framing_amqp_0_9_1), + Sequence). + +pmerge(_Config) -> + P = [{a, 1}, {b, 2}], + P = rabbit_misc:pmerge(a, 3, P), + [{c, 3} | P] = rabbit_misc:pmerge(c, 3, P), + passed. + +plmerge(_Config) -> + P1 = [{a, 1}, {b, 2}, {c, 3}], + P2 = [{a, 2}, {d, 4}], + [{a, 1}, {b, 2}, {c, 3}, {d, 4}] = rabbit_misc:plmerge(P1, P2), + passed. + +table_codec(_Config) -> + %% FIXME this does not test inexact numbers (double and float) yet, + %% because they won't pass the equality assertions + Table = [{<<"longstr">>, longstr, <<"Here is a long string">>}, + {<<"signedint">>, signedint, 12345}, + {<<"decimal">>, decimal, {3, 123456}}, + {<<"timestamp">>, timestamp, 109876543209876}, + {<<"table">>, table, [{<<"one">>, signedint, 54321}, + {<<"two">>, longstr, + <<"A long string">>}]}, + {<<"byte">>, byte, -128}, + {<<"long">>, long, 1234567890}, + {<<"short">>, short, 655}, + {<<"bool">>, bool, true}, + {<<"binary">>, binary, <<"a binary string">>}, + {<<"unsignedbyte">>, unsignedbyte, 250}, + {<<"unsignedshort">>, unsignedshort, 65530}, + {<<"unsignedint">>, unsignedint, 4294967290}, + {<<"void">>, void, undefined}, + {<<"array">>, array, [{signedint, 54321}, + {longstr, <<"A long string">>}]} + ], + Binary = << + 7,"longstr", "S", 21:32, "Here is a long string", + 9,"signedint", "I", 12345:32/signed, + 7,"decimal", "D", 3, 123456:32, + 9,"timestamp", "T", 109876543209876:64, + 5,"table", "F", 31:32, % length of table + 3,"one", "I", 54321:32, + 3,"two", "S", 13:32, "A long string", + 4,"byte", "b", -128:8/signed, + 4,"long", "l", 1234567890:64, + 5,"short", "s", 655:16, + 4,"bool", "t", 1, + 6,"binary", "x", 15:32, "a binary string", + 12,"unsignedbyte", "B", 250:8/unsigned, + 13,"unsignedshort", "u", 65530:16/unsigned, + 11,"unsignedint", "i", 4294967290:32/unsigned, + 4,"void", "V", + 5,"array", "A", 23:32, + "I", 54321:32, + "S", 13:32, "A long string" + >>, + Binary = rabbit_binary_generator:generate_table(Table), + Table = rabbit_binary_parser:parse_table(Binary), + passed. + +unfold(_Config) -> + {[], test} = rabbit_misc:unfold(fun (_V) -> false end, test), + List = lists:seq(2,20,2), + {List, 0} = rabbit_misc:unfold(fun (0) -> false; + (N) -> {true, N*2, N-1} + end, 10), + passed. + +version_equivalance(_Config) -> + true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0"), + true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.1"), + true = rabbit_misc:version_minor_equivalent("%%VSN%%", "%%VSN%%"), + false = rabbit_misc:version_minor_equivalent("3.0.0", "3.1.0"), + false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0"), + false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0.1"), + false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.foo"), + passed. diff --git a/test/unit_inbroker_SUITE.erl b/test/unit_inbroker_SUITE.erl index 4b58221727b3..dfde1fba220f 100644 --- a/test/unit_inbroker_SUITE.erl +++ b/test/unit_inbroker_SUITE.erl @@ -17,30 +17,127 @@ -module(unit_inbroker_SUITE). -include_lib("common_test/include/ct.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("kernel/include/file.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). -compile(export_all). --define(TIMEOUT_LIST_OPS_PASS, 1000). +-define(PERSISTENT_MSG_STORE, msg_store_persistent). +-define(TRANSIENT_MSG_STORE, msg_store_transient). + +-define(TIMEOUT_LIST_OPS_PASS, 5000). +-define(TIMEOUT, 30000). + +-define(CLEANUP_QUEUE_NAME, <<"cleanup-queue">>). + +-define(VARIABLE_QUEUE_TESTCASES, [ + variable_queue_dynamic_duration_change, + variable_queue_partial_segments_delta_thing, + variable_queue_all_the_bits_not_covered_elsewhere_A, + variable_queue_all_the_bits_not_covered_elsewhere_B, + variable_queue_drop, + variable_queue_fold_msg_on_disk, + variable_queue_dropfetchwhile, + variable_queue_dropwhile_varying_ram_duration, + variable_queue_fetchwhile_varying_ram_duration, + variable_queue_ack_limiting, + variable_queue_purge, + variable_queue_requeue, + variable_queue_requeue_ram_beta, + variable_queue_fold, + variable_queue_batch_publish, + variable_queue_batch_publish_delivered + ]). + +-define(BACKING_QUEUE_TESTCASES, [ + bq_queue_index, + bq_queue_index_props, + {variable_queue_default, [], ?VARIABLE_QUEUE_TESTCASES}, + {variable_queue_lazy, [], ?VARIABLE_QUEUE_TESTCASES ++ + [variable_queue_mode_change]}, + bq_variable_queue_delete_msg_store_files_callback, + bq_queue_recover + ]). + +-define(CLUSTER_TESTCASES, [ + delegates_async, + delegates_sync, + queue_cleanup, + declare_on_dead_queue, + refresh_events + ]). all() -> [ - {group, parallel_tests} + {group, parallel_tests}, + {group, non_parallel_tests}, + {group, backing_queue_tests}, + {group, cluster_tests}, + + {group, disconnect_detected_during_alarm}, + {group, list_consumers_sanity_check}, + {group, list_queues_online_and_offline} ]. groups() -> [ {parallel_tests, [parallel], [ - {credit_flow, [parallel], [ - credit_flow_settings - ]}, - {password_hashing, [parallel], [ + amqp_connection_refusal, + configurable_server_properties, + confirms, + credit_flow_settings, + dynamic_mirroring, + gen_server2_with_state, + list_operations_timeout_pass, + mcall, + {password_hashing, [], [ password_hashing, change_password ]}, - {rabbitmqctl, [parallel], [ - list_operations_timeout_pass - ]} + {policy_validation, [parallel, {repeat, 20}], [ + ha_policy_validation, + policy_validation, + policy_opts_validation, + queue_master_location_policy_validation, + queue_modes_policy_validation, + vhost_removed_while_updating_policy + ]}, + runtime_parameters, + set_disk_free_limit_command, + topic_matching, + user_management + ]}, + {non_parallel_tests, [], [ + app_management, %% Restart RabbitMQ. + channel_statistics, %% Expect specific statistics. + disk_monitor, %% Replace rabbit_misc module. + file_handle_cache, %% Change FHC limit. + head_message_timestamp_statistics, %% Expect specific statistics. + log_management, %% Check log files. + log_management_during_startup, %% Check log files. + memory_high_watermark, %% Trigger alarm. + rotate_logs_without_suffix, %% Check log files. + server_status %% Trigger alarm. + ]}, + {backing_queue_tests, [], [ + msg_store, + {backing_queue_embed_limit_0, [], ?BACKING_QUEUE_TESTCASES}, + {backing_queue_embed_limit_1024, [], ?BACKING_QUEUE_TESTCASES} + ]}, + {cluster_tests, [], [ + {from_cluster_node1, [], ?CLUSTER_TESTCASES}, + {from_cluster_node2, [], ?CLUSTER_TESTCASES} + ]}, + + %% Test previously executed with the multi-node target. + {disconnect_detected_during_alarm, [], [ + disconnect_detected_during_alarm %% Trigger alarm. + ]}, + {list_consumers_sanity_check, [], [ + list_consumers_sanity_check + ]}, + {list_queues_online_and_offline, [], [ + list_queues_online_and_offline %% Stop node B. ]} ]. @@ -50,265 +147,3670 @@ groups() -> init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), - rabbit_ct_helpers:run_setup_steps(?MODULE, Config). + rabbit_ct_helpers:run_setup_steps(Config). end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). -init_per_group(_, Config) -> Config. -end_per_group(_, Config) -> Config. +init_per_group(Group, Config) -> + case lists:member({group, Group}, all()) of + true -> + ClusterSize = case Group of + disconnect_detected_during_alarm -> 1; + list_consumers_sanity_check -> 1; + _ -> 2 + end, + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, Group}, + {rmq_nodes_count, ClusterSize} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps() ++ [ + fun(C) -> init_per_group1(Group, C) end, + fun setup_file_handle_cache/1 + ]); + false -> + rabbit_ct_helpers:run_steps(Config, [ + fun(C) -> init_per_group1(Group, C) end + ]) + end. -%% --------------------------------------------------------------------------- -%% Credit flow. -%% --------------------------------------------------------------------------- +init_per_group1(backing_queue_tests, Config) -> + Module = rabbit_ct_broker_helpers:rpc(Config, 0, + application, get_env, [rabbit, backing_queue_module]), + case Module of + {ok, rabbit_priority_queue} -> + rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, setup_backing_queue_test_group, [Config]); + _ -> + {skip, rabbit_misc:format( + "Backing queue module not supported by this test group: ~p~n", + [Module])} + end; +init_per_group1(backing_queue_embed_limit_0, Config) -> + ok = rabbit_ct_broker_helpers:rpc(Config, 0, + application, set_env, [rabbit, queue_index_embed_msgs_below, 0]), + Config; +init_per_group1(backing_queue_embed_limit_1024, Config) -> + ok = rabbit_ct_broker_helpers:rpc(Config, 0, + application, set_env, [rabbit, queue_index_embed_msgs_below, 1024]), + Config; +init_per_group1(variable_queue_default, Config) -> + rabbit_ct_helpers:set_config(Config, {variable_queue_type, default}); +init_per_group1(variable_queue_lazy, Config) -> + rabbit_ct_helpers:set_config(Config, {variable_queue_type, lazy}); +init_per_group1(from_cluster_node1, Config) -> + rabbit_ct_helpers:set_config(Config, {test_direction, {0, 1}}); +init_per_group1(from_cluster_node2, Config) -> + rabbit_ct_helpers:set_config(Config, {test_direction, {1, 0}}); +init_per_group1(_, Config) -> + Config. -credit_flow_settings(Config) -> - passed = rabbit_ct_broker_helpers:run_on_broker( - ?config(rmq_nodename, Config), - ?MODULE, credit_flow_settings1, [Config]). +setup_file_handle_cache(Config) -> + ok = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, setup_file_handle_cache1, []), + Config. -credit_flow_settings1(_Config) -> - %% default values - passed = test_proc(200, 50), +setup_file_handle_cache1() -> + %% FIXME: Why are we doing this? + application:set_env(rabbit, file_handles_high_watermark, 10), + ok = file_handle_cache:set_limit(10), + ok. - application:set_env(rabbit, credit_flow_default_credit, {100, 20}), - passed = test_proc(100, 20), +end_per_group(Group, Config) -> + case lists:member({group, Group}, all()) of + true -> + rabbit_ct_helpers:run_steps(Config, + [fun(C) -> end_per_group1(Group, C) end] ++ + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()); + false -> + Config + end. - application:unset_env(rabbit, credit_flow_default_credit), +end_per_group1(backing_queue_tests, Config) -> + rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, teardown_backing_queue_test_group, [Config]); +end_per_group1(Group, Config) +when Group =:= backing_queue_embed_limit_0 +orelse Group =:= backing_queue_embed_limit_1024 -> + ok = rabbit_ct_broker_helpers:rpc(Config, 0, + application, set_env, [rabbit, queue_index_embed_msgs_below, + ?config(rmq_queue_index_embed_msgs_below, Config)]), + Config; +end_per_group1(_, Config) -> + Config. - % back to defaults - passed = test_proc(200, 50), +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% ------------------------------------------------------------------- +%% Application management. +%% ------------------------------------------------------------------- + +app_management(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, app_management1, [Config]). + +app_management1(_Config) -> + control_action(wait, [os:getenv("RABBITMQ_PID_FILE")]), + %% Starting, stopping and diagnostics. Note that we don't try + %% 'report' when the rabbit app is stopped and that we enable + %% tracing for the duration of this function. + ok = control_action(trace_on, []), + ok = control_action(stop_app, []), + ok = control_action(stop_app, []), + ok = control_action(status, []), + ok = control_action(cluster_status, []), + ok = control_action(environment, []), + ok = control_action(start_app, []), + ok = control_action(start_app, []), + ok = control_action(status, []), + ok = control_action(report, []), + ok = control_action(cluster_status, []), + ok = control_action(environment, []), + ok = control_action(trace_off, []), passed. -test_proc(InitialCredit, MoreCreditAfter) -> - Pid = spawn(fun dummy/0), - Pid ! {credit, self()}, - {InitialCredit, MoreCreditAfter} = - receive - {credit, Val} -> Val - end, +%% ------------------------------------------------------------------- +%% Message store. +%% ------------------------------------------------------------------- + +msg_store(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, msg_store1, [Config]). + +msg_store1(_Config) -> + restart_msg_store_empty(), + MsgIds = [msg_id_bin(M) || M <- lists:seq(1,100)], + {MsgIds1stHalf, MsgIds2ndHalf} = lists:split(length(MsgIds) div 2, MsgIds), + Ref = rabbit_guid:gen(), + {Cap, MSCState} = msg_store_client_init_capture( + ?PERSISTENT_MSG_STORE, Ref), + Ref2 = rabbit_guid:gen(), + {Cap2, MSC2State} = msg_store_client_init_capture( + ?PERSISTENT_MSG_STORE, Ref2), + %% check we don't contain any of the msgs we're about to publish + false = msg_store_contains(false, MsgIds, MSCState), + %% test confirm logic + passed = test_msg_store_confirms([hd(MsgIds)], Cap, MSCState), + %% check we don't contain any of the msgs we're about to publish + false = msg_store_contains(false, MsgIds, MSCState), + %% publish the first half + ok = msg_store_write(MsgIds1stHalf, MSCState), + %% sync on the first half + ok = on_disk_await(Cap, MsgIds1stHalf), + %% publish the second half + ok = msg_store_write(MsgIds2ndHalf, MSCState), + %% check they're all in there + true = msg_store_contains(true, MsgIds, MSCState), + %% publish the latter half twice so we hit the caching and ref + %% count code. We need to do this through a 2nd client since a + %% single client is not supposed to write the same message more + %% than once without first removing it. + ok = msg_store_write(MsgIds2ndHalf, MSC2State), + %% check they're still all in there + true = msg_store_contains(true, MsgIds, MSCState), + %% sync on the 2nd half + ok = on_disk_await(Cap2, MsgIds2ndHalf), + %% cleanup + ok = on_disk_stop(Cap2), + ok = rabbit_msg_store:client_delete_and_terminate(MSC2State), + ok = on_disk_stop(Cap), + %% read them all + MSCState1 = msg_store_read(MsgIds, MSCState), + %% read them all again - this will hit the cache, not disk + MSCState2 = msg_store_read(MsgIds, MSCState1), + %% remove them all + ok = msg_store_remove(MsgIds, MSCState2), + %% check first half doesn't exist + false = msg_store_contains(false, MsgIds1stHalf, MSCState2), + %% check second half does exist + true = msg_store_contains(true, MsgIds2ndHalf, MSCState2), + %% read the second half again + MSCState3 = msg_store_read(MsgIds2ndHalf, MSCState2), + %% read the second half again, just for fun (aka code coverage) + MSCState4 = msg_store_read(MsgIds2ndHalf, MSCState3), + ok = rabbit_msg_store:client_terminate(MSCState4), + %% stop and restart, preserving every other msg in 2nd half + ok = rabbit_variable_queue:stop_msg_store(), + ok = rabbit_variable_queue:start_msg_store( + [], {fun ([]) -> finished; + ([MsgId|MsgIdsTail]) + when length(MsgIdsTail) rem 2 == 0 -> + {MsgId, 1, MsgIdsTail}; + ([MsgId|MsgIdsTail]) -> + {MsgId, 0, MsgIdsTail} + end, MsgIds2ndHalf}), + MSCState5 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref), + %% check we have the right msgs left + lists:foldl( + fun (MsgId, Bool) -> + not(Bool = rabbit_msg_store:contains(MsgId, MSCState5)) + end, false, MsgIds2ndHalf), + ok = rabbit_msg_store:client_terminate(MSCState5), + %% restart empty + restart_msg_store_empty(), + MSCState6 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref), + %% check we don't contain any of the msgs + false = msg_store_contains(false, MsgIds, MSCState6), + %% publish the first half again + ok = msg_store_write(MsgIds1stHalf, MSCState6), + %% this should force some sort of sync internally otherwise misread + ok = rabbit_msg_store:client_terminate( + msg_store_read(MsgIds1stHalf, MSCState6)), + MSCState7 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref), + ok = msg_store_remove(MsgIds1stHalf, MSCState7), + ok = rabbit_msg_store:client_terminate(MSCState7), + %% restart empty + restart_msg_store_empty(), %% now safe to reuse msg_ids + %% push a lot of msgs in... at least 100 files worth + {ok, FileSize} = application:get_env(rabbit, msg_store_file_size_limit), + PayloadSizeBits = 65536, + BigCount = trunc(100 * FileSize / (PayloadSizeBits div 8)), + MsgIdsBig = [msg_id_bin(X) || X <- lists:seq(1, BigCount)], + Payload = << 0:PayloadSizeBits >>, + ok = with_msg_store_client( + ?PERSISTENT_MSG_STORE, Ref, + fun (MSCStateM) -> + [ok = rabbit_msg_store:write(MsgId, Payload, MSCStateM) || + MsgId <- MsgIdsBig], + MSCStateM + end), + %% now read them to ensure we hit the fast client-side reading + ok = foreach_with_msg_store_client( + ?PERSISTENT_MSG_STORE, Ref, + fun (MsgId, MSCStateM) -> + {{ok, Payload}, MSCStateN} = rabbit_msg_store:read( + MsgId, MSCStateM), + MSCStateN + end, MsgIdsBig), + %% .., then 3s by 1... + ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref, + [msg_id_bin(X) || X <- lists:seq(BigCount, 1, -3)]), + %% .., then remove 3s by 2, from the young end first. This hits + %% GC (under 50% good data left, but no empty files. Must GC). + ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref, + [msg_id_bin(X) || X <- lists:seq(BigCount-1, 1, -3)]), + %% .., then remove 3s by 3, from the young end first. This hits + %% GC... + ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref, + [msg_id_bin(X) || X <- lists:seq(BigCount-2, 1, -3)]), + %% ensure empty + ok = with_msg_store_client( + ?PERSISTENT_MSG_STORE, Ref, + fun (MSCStateM) -> + false = msg_store_contains(false, MsgIdsBig, MSCStateM), + MSCStateM + end), + %% + passed = test_msg_store_client_delete_and_terminate(), + %% restart empty + restart_msg_store_empty(), passed. -dummy() -> - credit_flow:send(self()), +restart_msg_store_empty() -> + ok = rabbit_variable_queue:stop_msg_store(), + ok = rabbit_variable_queue:start_msg_store( + undefined, {fun (ok) -> finished end, ok}). + +msg_id_bin(X) -> + erlang:md5(term_to_binary(X)). + +on_disk_capture() -> receive - {credit, From} -> - From ! {credit, get(credit_flow_default_credit)}; - _ -> - dummy() + {await, MsgIds, Pid} -> on_disk_capture([], MsgIds, Pid); + stop -> done end. -%% --------------------------------------------------------------------------- -%% Password hashing. -%% --------------------------------------------------------------------------- +on_disk_capture([_|_], _Awaiting, Pid) -> + Pid ! {self(), surplus}; +on_disk_capture(OnDisk, Awaiting, Pid) -> + receive + {on_disk, MsgIdsS} -> + MsgIds = gb_sets:to_list(MsgIdsS), + on_disk_capture(OnDisk ++ (MsgIds -- Awaiting), Awaiting -- MsgIds, + Pid); + stop -> + done + after (case Awaiting of [] -> 200; _ -> ?TIMEOUT end) -> + case Awaiting of + [] -> Pid ! {self(), arrived}, on_disk_capture(); + _ -> Pid ! {self(), timeout} + end + end. -password_hashing(Config) -> - passed = rabbit_ct_broker_helpers:run_on_broker( - ?config(rmq_nodename, Config), - ?MODULE, password_hashing1, [Config]). +on_disk_await(Pid, MsgIds) when is_list(MsgIds) -> + Pid ! {await, MsgIds, self()}, + receive + {Pid, arrived} -> ok; + {Pid, Error} -> Error + end. -password_hashing1(_Config) -> - rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(), - application:set_env(rabbit, password_hashing_module, - rabbit_password_hashing_md5), - rabbit_password_hashing_md5 = rabbit_password:hashing_mod(), - application:set_env(rabbit, password_hashing_module, - rabbit_password_hashing_sha256), - rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(), +on_disk_stop(Pid) -> + MRef = erlang:monitor(process, Pid), + Pid ! stop, + receive {'DOWN', MRef, process, Pid, _Reason} -> + ok + end. - rabbit_password_hashing_sha256 = - rabbit_password:hashing_mod(rabbit_password_hashing_sha256), - rabbit_password_hashing_md5 = - rabbit_password:hashing_mod(rabbit_password_hashing_md5), - rabbit_password_hashing_md5 = - rabbit_password:hashing_mod(undefined), +msg_store_client_init_capture(MsgStore, Ref) -> + Pid = spawn(fun on_disk_capture/0), + {Pid, rabbit_msg_store:client_init( + MsgStore, Ref, fun (MsgIds, _ActionTaken) -> + Pid ! {on_disk, MsgIds} + end, undefined)}. - rabbit_password_hashing_md5 = - rabbit_auth_backend_internal:hashing_module_for_user( - #internal_user{}), - rabbit_password_hashing_md5 = - rabbit_auth_backend_internal:hashing_module_for_user( - #internal_user{ - hashing_algorithm = undefined - }), - rabbit_password_hashing_md5 = - rabbit_auth_backend_internal:hashing_module_for_user( - #internal_user{ - hashing_algorithm = rabbit_password_hashing_md5 - }), +msg_store_contains(Atom, MsgIds, MSCState) -> + Atom = lists:foldl( + fun (MsgId, Atom1) when Atom1 =:= Atom -> + rabbit_msg_store:contains(MsgId, MSCState) end, + Atom, MsgIds). - rabbit_password_hashing_sha256 = - rabbit_auth_backend_internal:hashing_module_for_user( - #internal_user{ - hashing_algorithm = rabbit_password_hashing_sha256 - }), +msg_store_read(MsgIds, MSCState) -> + lists:foldl(fun (MsgId, MSCStateM) -> + {{ok, MsgId}, MSCStateN} = rabbit_msg_store:read( + MsgId, MSCStateM), + MSCStateN + end, MSCState, MsgIds). - passed. +msg_store_write(MsgIds, MSCState) -> + ok = lists:foldl(fun (MsgId, ok) -> + rabbit_msg_store:write(MsgId, MsgId, MSCState) + end, ok, MsgIds). -change_password(Config) -> - passed = rabbit_ct_broker_helpers:run_on_broker( - ?config(rmq_nodename, Config), - ?MODULE, change_password1, [Config]). +msg_store_write_flow(MsgIds, MSCState) -> + ok = lists:foldl(fun (MsgId, ok) -> + rabbit_msg_store:write_flow(MsgId, MsgId, MSCState) + end, ok, MsgIds). -change_password1(_Config) -> - UserName = <<"test_user">>, - Password = <<"test_password">>, - case rabbit_auth_backend_internal:lookup_user(UserName) of - {ok, _} -> rabbit_auth_backend_internal:delete_user(UserName); - _ -> ok - end, - ok = application:set_env(rabbit, password_hashing_module, - rabbit_password_hashing_md5), - ok = rabbit_auth_backend_internal:add_user(UserName, Password), - {ok, #auth_user{username = UserName}} = - rabbit_auth_backend_internal:user_login_authentication( - UserName, [{password, Password}]), - ok = application:set_env(rabbit, password_hashing_module, - rabbit_password_hashing_sha256), - {ok, #auth_user{username = UserName}} = - rabbit_auth_backend_internal:user_login_authentication( - UserName, [{password, Password}]), +msg_store_remove(MsgIds, MSCState) -> + rabbit_msg_store:remove(MsgIds, MSCState). - NewPassword = <<"test_password1">>, - ok = rabbit_auth_backend_internal:change_password(UserName, NewPassword), - {ok, #auth_user{username = UserName}} = - rabbit_auth_backend_internal:user_login_authentication( - UserName, [{password, NewPassword}]), +msg_store_remove(MsgStore, Ref, MsgIds) -> + with_msg_store_client(MsgStore, Ref, + fun (MSCStateM) -> + ok = msg_store_remove(MsgIds, MSCStateM), + MSCStateM + end). - {refused, _, [UserName]} = - rabbit_auth_backend_internal:user_login_authentication( - UserName, [{password, Password}]), +with_msg_store_client(MsgStore, Ref, Fun) -> + rabbit_msg_store:client_terminate( + Fun(msg_store_client_init(MsgStore, Ref))). + +foreach_with_msg_store_client(MsgStore, Ref, Fun, L) -> + rabbit_msg_store:client_terminate( + lists:foldl(fun (MsgId, MSCState) -> Fun(MsgId, MSCState) end, + msg_store_client_init(MsgStore, Ref), L)). + +test_msg_store_confirms(MsgIds, Cap, MSCState) -> + %% write -> confirmed + ok = msg_store_write(MsgIds, MSCState), + ok = on_disk_await(Cap, MsgIds), + %% remove -> _ + ok = msg_store_remove(MsgIds, MSCState), + ok = on_disk_await(Cap, []), + %% write, remove -> confirmed + ok = msg_store_write(MsgIds, MSCState), + ok = msg_store_remove(MsgIds, MSCState), + ok = on_disk_await(Cap, MsgIds), + %% write, remove, write -> confirmed, confirmed + ok = msg_store_write(MsgIds, MSCState), + ok = msg_store_remove(MsgIds, MSCState), + ok = msg_store_write(MsgIds, MSCState), + ok = on_disk_await(Cap, MsgIds ++ MsgIds), + %% remove, write -> confirmed + ok = msg_store_remove(MsgIds, MSCState), + ok = msg_store_write(MsgIds, MSCState), + ok = on_disk_await(Cap, MsgIds), + %% remove, write, remove -> confirmed + ok = msg_store_remove(MsgIds, MSCState), + ok = msg_store_write(MsgIds, MSCState), + ok = msg_store_remove(MsgIds, MSCState), + ok = on_disk_await(Cap, MsgIds), + %% confirmation on timer-based sync + passed = test_msg_store_confirm_timer(), + passed. + +test_msg_store_confirm_timer() -> + Ref = rabbit_guid:gen(), + MsgId = msg_id_bin(1), + Self = self(), + MSCState = rabbit_msg_store:client_init( + ?PERSISTENT_MSG_STORE, Ref, + fun (MsgIds, _ActionTaken) -> + case gb_sets:is_member(MsgId, MsgIds) of + true -> Self ! on_disk; + false -> ok + end + end, undefined), + ok = msg_store_write([MsgId], MSCState), + ok = msg_store_keep_busy_until_confirm([msg_id_bin(2)], MSCState, false), + ok = msg_store_remove([MsgId], MSCState), + ok = rabbit_msg_store:client_delete_and_terminate(MSCState), + passed. + +msg_store_keep_busy_until_confirm(MsgIds, MSCState, Blocked) -> + After = case Blocked of + false -> 0; + true -> ?MAX_WAIT + end, + Recurse = fun () -> msg_store_keep_busy_until_confirm( + MsgIds, MSCState, credit_flow:blocked()) end, + receive + on_disk -> ok; + {bump_credit, Msg} -> credit_flow:handle_bump_msg(Msg), + Recurse() + after After -> + ok = msg_store_write_flow(MsgIds, MSCState), + ok = msg_store_remove(MsgIds, MSCState), + Recurse() + end. + +test_msg_store_client_delete_and_terminate() -> + restart_msg_store_empty(), + MsgIds = [msg_id_bin(M) || M <- lists:seq(1, 10)], + Ref = rabbit_guid:gen(), + MSCState = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref), + ok = msg_store_write(MsgIds, MSCState), + %% test the 'dying client' fast path for writes + ok = rabbit_msg_store:client_delete_and_terminate(MSCState), passed. %% ------------------------------------------------------------------- -%% rabbitmqctl. +%% Backing queue. %% ------------------------------------------------------------------- -list_operations_timeout_pass(Config) -> - passed = rabbit_ct_broker_helpers:run_on_broker( - ?config(rmq_nodename, Config), - ?MODULE, list_operations_timeout_pass1, [Config]). +setup_backing_queue_test_group(Config) -> + {ok, FileSizeLimit} = + application:get_env(rabbit, msg_store_file_size_limit), + application:set_env(rabbit, msg_store_file_size_limit, 512), + {ok, MaxJournal} = + application:get_env(rabbit, queue_index_max_journal_entries), + application:set_env(rabbit, queue_index_max_journal_entries, 128), + application:set_env(rabbit, msg_store_file_size_limit, + FileSizeLimit), + {ok, Bytes} = + application:get_env(rabbit, queue_index_embed_msgs_below), + rabbit_ct_helpers:set_config(Config, [ + {rmq_queue_index_max_journal_entries, MaxJournal}, + {rmq_queue_index_embed_msgs_below, Bytes} + ]). -list_operations_timeout_pass1(_Config) -> - %% create a few things so there is some useful information to list - {_Writer1, Limiter1, Ch1} = rabbit_ct_broker_helpers:test_channel(), - {_Writer2, Limiter2, Ch2} = rabbit_ct_broker_helpers:test_channel(), +teardown_backing_queue_test_group(Config) -> + %% FIXME: Undo all the setup function did. + application:set_env(rabbit, queue_index_max_journal_entries, + ?config(rmq_queue_index_max_journal_entries, Config)), + %% We will have restarted the message store, and thus changed + %% the order of the children of rabbit_sup. This will cause + %% problems if there are subsequent failures - see bug 24262. + ok = restart_app(), + Config. - [Q, Q2] = [Queue || Name <- [<<"foo">>, <<"bar">>], - {new, Queue = #amqqueue{}} <- - [rabbit_amqqueue:declare( - rabbit_misc:r(<<"/">>, queue, Name), - false, false, [], none)]], +bq_queue_index(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, bq_queue_index1, [Config]). - ok = rabbit_amqqueue:basic_consume( - Q, true, Ch1, Limiter1, false, 0, <<"ctag1">>, true, [], - undefined), - ok = rabbit_amqqueue:basic_consume( - Q2, true, Ch2, Limiter2, false, 0, <<"ctag2">>, true, [], - undefined), +bq_queue_index1(_Config) -> + SegmentSize = rabbit_queue_index:next_segment_boundary(0), + TwoSegs = SegmentSize + SegmentSize, + MostOfASegment = trunc(SegmentSize*0.75), + SeqIdsA = lists:seq(0, MostOfASegment-1), + SeqIdsB = lists:seq(MostOfASegment, 2*MostOfASegment), + SeqIdsC = lists:seq(0, trunc(SegmentSize/2)), + SeqIdsD = lists:seq(0, SegmentSize*4), - %% list users - ok = rabbit_ct_helpers:control_action(add_user, ["foo", "bar"]), - {error, {user_already_exists, _}} = - rabbit_ct_helpers:control_action(add_user, ["foo", "bar"]), - ok = rabbit_ct_helpers:control_action_t(list_users, [], - ?TIMEOUT_LIST_OPS_PASS), + with_empty_test_queue( + fun (Qi0) -> + {0, 0, Qi1} = rabbit_queue_index:bounds(Qi0), + {Qi2, SeqIdsMsgIdsA} = queue_index_publish(SeqIdsA, false, Qi1), + {0, SegmentSize, Qi3} = rabbit_queue_index:bounds(Qi2), + {ReadA, Qi4} = rabbit_queue_index:read(0, SegmentSize, Qi3), + ok = verify_read_with_published(false, false, ReadA, + lists:reverse(SeqIdsMsgIdsA)), + %% should get length back as 0, as all the msgs were transient + {0, 0, Qi6} = restart_test_queue(Qi4), + {0, 0, Qi7} = rabbit_queue_index:bounds(Qi6), + {Qi8, SeqIdsMsgIdsB} = queue_index_publish(SeqIdsB, true, Qi7), + {0, TwoSegs, Qi9} = rabbit_queue_index:bounds(Qi8), + {ReadB, Qi10} = rabbit_queue_index:read(0, SegmentSize, Qi9), + ok = verify_read_with_published(false, true, ReadB, + lists:reverse(SeqIdsMsgIdsB)), + %% should get length back as MostOfASegment + LenB = length(SeqIdsB), + BytesB = LenB * 10, + {LenB, BytesB, Qi12} = restart_test_queue(Qi10), + {0, TwoSegs, Qi13} = rabbit_queue_index:bounds(Qi12), + Qi14 = rabbit_queue_index:deliver(SeqIdsB, Qi13), + {ReadC, Qi15} = rabbit_queue_index:read(0, SegmentSize, Qi14), + ok = verify_read_with_published(true, true, ReadC, + lists:reverse(SeqIdsMsgIdsB)), + Qi16 = rabbit_queue_index:ack(SeqIdsB, Qi15), + Qi17 = rabbit_queue_index:flush(Qi16), + %% Everything will have gone now because #pubs == #acks + {0, 0, Qi18} = rabbit_queue_index:bounds(Qi17), + %% should get length back as 0 because all persistent + %% msgs have been acked + {0, 0, Qi19} = restart_test_queue(Qi18), + Qi19 + end), - %% list parameters - ok = rabbit_runtime_parameters_test:register(), - ok = rabbit_ct_helpers:control_action(set_parameter, - ["test", "good", "123"]), - ok = rabbit_ct_helpers:control_action_t(list_parameters, [], - ?TIMEOUT_LIST_OPS_PASS), - ok = rabbit_ct_helpers:control_action(clear_parameter, - ["test", "good"]), - rabbit_runtime_parameters_test:unregister(), + %% These next bits are just to hit the auto deletion of segment files. + %% First, partials: + %% a) partial pub+del+ack, then move to new segment + with_empty_test_queue( + fun (Qi0) -> + {Qi1, _SeqIdsMsgIdsC} = queue_index_publish(SeqIdsC, + false, Qi0), + Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1), + Qi3 = rabbit_queue_index:ack(SeqIdsC, Qi2), + Qi4 = rabbit_queue_index:flush(Qi3), + {Qi5, _SeqIdsMsgIdsC1} = queue_index_publish([SegmentSize], + false, Qi4), + Qi5 + end), - %% list vhosts - ok = rabbit_ct_helpers:control_action(add_vhost, ["/testhost"]), - {error, {vhost_already_exists, _}} = - rabbit_ct_helpers:control_action(add_vhost, ["/testhost"]), - ok = rabbit_ct_helpers:control_action_t(list_vhosts, [], - ?TIMEOUT_LIST_OPS_PASS), + %% b) partial pub+del, then move to new segment, then ack all in old segment + with_empty_test_queue( + fun (Qi0) -> + {Qi1, _SeqIdsMsgIdsC2} = queue_index_publish(SeqIdsC, + false, Qi0), + Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1), + {Qi3, _SeqIdsMsgIdsC3} = queue_index_publish([SegmentSize], + false, Qi2), + Qi4 = rabbit_queue_index:ack(SeqIdsC, Qi3), + rabbit_queue_index:flush(Qi4) + end), - %% list permissions - ok = rabbit_ct_helpers:control_action(set_permissions, - ["foo", ".*", ".*", ".*"], - [{"-p", "/testhost"}]), - ok = rabbit_ct_helpers:control_action_t(list_permissions, [], - [{"-p", "/testhost"}], - ?TIMEOUT_LIST_OPS_PASS), + %% c) just fill up several segments of all pubs, then +dels, then +acks + with_empty_test_queue( + fun (Qi0) -> + {Qi1, _SeqIdsMsgIdsD} = queue_index_publish(SeqIdsD, + false, Qi0), + Qi2 = rabbit_queue_index:deliver(SeqIdsD, Qi1), + Qi3 = rabbit_queue_index:ack(SeqIdsD, Qi2), + rabbit_queue_index:flush(Qi3) + end), - %% list user permissions - ok = rabbit_ct_helpers:control_action_t(list_user_permissions, ["foo"], - ?TIMEOUT_LIST_OPS_PASS), + %% d) get messages in all states to a segment, then flush, then do + %% the same again, don't flush and read. This will hit all + %% possibilities in combining the segment with the journal. + with_empty_test_queue( + fun (Qi0) -> + {Qi1, [Seven,Five,Four|_]} = queue_index_publish([0,1,2,4,5,7], + false, Qi0), + Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1), + Qi3 = rabbit_queue_index:ack([0], Qi2), + Qi4 = rabbit_queue_index:flush(Qi3), + {Qi5, [Eight,Six|_]} = queue_index_publish([3,6,8], false, Qi4), + Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5), + Qi7 = rabbit_queue_index:ack([1,2,3], Qi6), + {[], Qi8} = rabbit_queue_index:read(0, 4, Qi7), + {ReadD, Qi9} = rabbit_queue_index:read(4, 7, Qi8), + ok = verify_read_with_published(true, false, ReadD, + [Four, Five, Six]), + {ReadE, Qi10} = rabbit_queue_index:read(7, 9, Qi9), + ok = verify_read_with_published(false, false, ReadE, + [Seven, Eight]), + Qi10 + end), - %% list policies - ok = rabbit_ct_helpers:control_action_opts(["set_policy", "name", ".*", - "{\"ha-mode\":\"all\"}"]), - ok = rabbit_ct_helpers:control_action_t(list_policies, [], - ?TIMEOUT_LIST_OPS_PASS), - ok = rabbit_ct_helpers:control_action(clear_policy, ["name"]), + %% e) as for (d), but use terminate instead of read, which will + %% exercise journal_minus_segment, not segment_plus_journal. + with_empty_test_queue( + fun (Qi0) -> + {Qi1, _SeqIdsMsgIdsE} = queue_index_publish([0,1,2,4,5,7], + true, Qi0), + Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1), + Qi3 = rabbit_queue_index:ack([0], Qi2), + {5, 50, Qi4} = restart_test_queue(Qi3), + {Qi5, _SeqIdsMsgIdsF} = queue_index_publish([3,6,8], true, Qi4), + Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5), + Qi7 = rabbit_queue_index:ack([1,2,3], Qi6), + {5, 50, Qi8} = restart_test_queue(Qi7), + Qi8 + end), - %% list queues - ok = rabbit_ct_helpers:info_action_t(list_queues, - rabbit_amqqueue:info_keys(), false, - ?TIMEOUT_LIST_OPS_PASS), + ok = rabbit_variable_queue:stop(), + {ok, _} = rabbit_variable_queue:start([]), - %% list exchanges - ok = rabbit_ct_helpers:info_action_t(list_exchanges, - rabbit_exchange:info_keys(), true, - ?TIMEOUT_LIST_OPS_PASS), + passed. - %% list bindings - ok = rabbit_ct_helpers:info_action_t(list_bindings, - rabbit_binding:info_keys(), true, - ?TIMEOUT_LIST_OPS_PASS), +bq_queue_index_props(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, bq_queue_index_props1, [Config]). - %% list connections - {H, P} = rabbit_ct_broker_helpers:find_listener(), - {ok, C1} = gen_tcp:connect(H, P, [binary, {active, false}]), - gen_tcp:send(C1, <<"AMQP", 0, 0, 9, 1>>), - {ok, <<1,0,0>>} = gen_tcp:recv(C1, 3, 100), +bq_queue_index_props1(_Config) -> + with_empty_test_queue( + fun(Qi0) -> + MsgId = rabbit_guid:gen(), + Props = #message_properties{expiry=12345, size = 10}, + Qi1 = rabbit_queue_index:publish( + MsgId, 1, Props, true, infinity, Qi0), + {[{MsgId, 1, Props, _, _}], Qi2} = + rabbit_queue_index:read(1, 2, Qi1), + Qi2 + end), - {ok, C2} = gen_tcp:connect(H, P, [binary, {active, false}]), - gen_tcp:send(C2, <<"AMQP", 0, 0, 9, 1>>), - {ok, <<1,0,0>>} = gen_tcp:recv(C2, 3, 100), + ok = rabbit_variable_queue:stop(), + {ok, _} = rabbit_variable_queue:start([]), - ok = rabbit_ct_helpers:info_action_t( - list_connections, rabbit_networking:connection_info_keys(), false, - ?TIMEOUT_LIST_OPS_PASS), + passed. - %% list consumers - ok = rabbit_ct_helpers:info_action_t( - list_consumers, rabbit_amqqueue:consumer_info_keys(), false, - ?TIMEOUT_LIST_OPS_PASS), +bq_variable_queue_delete_msg_store_files_callback(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, bq_variable_queue_delete_msg_store_files_callback1, [Config]). - %% list channels - ok = rabbit_ct_helpers:info_action_t( - list_channels, rabbit_channel:info_keys(), false, - ?TIMEOUT_LIST_OPS_PASS), +bq_variable_queue_delete_msg_store_files_callback1(Config) -> + ok = restart_msg_store_empty(), + {new, #amqqueue { pid = QPid, name = QName } = Q} = + rabbit_amqqueue:declare( + queue_name(Config, + <<"bq_variable_queue_delete_msg_store_files_callback-q">>), + true, false, [], none), + Payload = <<0:8388608>>, %% 1MB + Count = 30, + publish_and_confirm(Q, Payload, Count), - %% do some cleaning up - ok = rabbit_ct_helpers:control_action(delete_user, ["foo"]), - {error, {no_such_user, _}} = - rabbit_ct_helpers:control_action(delete_user, ["foo"]), + rabbit_amqqueue:set_ram_duration_target(QPid, 0), - ok = rabbit_ct_helpers:control_action(delete_vhost, ["/testhost"]), - {error, {no_such_vhost, _}} = - rabbit_ct_helpers:control_action(delete_vhost, ["/testhost"]), + {ok, Limiter} = rabbit_limiter:start_link(no_id), - %% close_connection - Conns = rabbit_networking:connections(), - [ok = rabbit_ct_helpers:control_action( - close_connection, [rabbit_misc:pid_to_string(ConnPid), "go away"]) - || ConnPid <- Conns], + CountMinusOne = Count - 1, + {ok, CountMinusOne, {QName, QPid, _AckTag, false, _Msg}} = + rabbit_amqqueue:basic_get(Q, self(), true, Limiter), + {ok, CountMinusOne} = rabbit_amqqueue:purge(Q), - %% cleanup queues - [{ok, _} = rabbit_amqqueue:delete(QR, false, false) || QR <- [Q, Q2]], + %% give the queue a second to receive the close_fds callback msg + timer:sleep(1000), - [begin - unlink(Chan), - ok = rabbit_channel:shutdown(Chan) - end || Chan <- [Ch1, Ch2]], + rabbit_amqqueue:delete(Q, false, false), + passed. + +bq_queue_recover(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, bq_queue_recover1, [Config]). + +bq_queue_recover1(Config) -> + Count = 2 * rabbit_queue_index:next_segment_boundary(0), + {new, #amqqueue { pid = QPid, name = QName } = Q} = + rabbit_amqqueue:declare(queue_name(Config, <<"bq_queue_recover-q">>), + true, false, [], none), + publish_and_confirm(Q, <<>>, Count), + + SupPid = rabbit_ct_broker_helpers:get_queue_sup_pid(QPid), + true = is_pid(SupPid), + exit(SupPid, kill), + exit(QPid, kill), + MRef = erlang:monitor(process, QPid), + receive {'DOWN', MRef, process, QPid, _Info} -> ok + after 10000 -> exit(timeout_waiting_for_queue_death) + end, + rabbit_amqqueue:stop(), + rabbit_amqqueue:start(rabbit_amqqueue:recover()), + {ok, Limiter} = rabbit_limiter:start_link(no_id), + rabbit_amqqueue:with_or_die( + QName, + fun (Q1 = #amqqueue { pid = QPid1 }) -> + CountMinusOne = Count - 1, + {ok, CountMinusOne, {QName, QPid1, _AckTag, true, _Msg}} = + rabbit_amqqueue:basic_get(Q1, self(), false, Limiter), + exit(QPid1, shutdown), + VQ1 = variable_queue_init(Q, true), + {{_Msg1, true, _AckTag1}, VQ2} = + rabbit_variable_queue:fetch(true, VQ1), + CountMinusOne = rabbit_variable_queue:len(VQ2), + _VQ3 = rabbit_variable_queue:delete_and_terminate(shutdown, VQ2), + ok = rabbit_amqqueue:internal_delete(QName) + end), passed. + +variable_queue_dynamic_duration_change(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_dynamic_duration_change1, [Config]). + +variable_queue_dynamic_duration_change1(Config) -> + with_fresh_variable_queue( + fun variable_queue_dynamic_duration_change2/1, + ?config(variable_queue_type, Config)). + +variable_queue_dynamic_duration_change2(VQ0) -> + SegmentSize = rabbit_queue_index:next_segment_boundary(0), + + %% start by sending in a couple of segments worth + Len = 2*SegmentSize, + VQ1 = variable_queue_publish(false, Len, VQ0), + %% squeeze and relax queue + Churn = Len div 32, + VQ2 = publish_fetch_and_ack(Churn, Len, VQ1), + + {Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2), + VQ7 = lists:foldl( + fun (Duration1, VQ4) -> + {_Duration, VQ5} = rabbit_variable_queue:ram_duration(VQ4), + VQ6 = variable_queue_set_ram_duration_target( + Duration1, VQ5), + publish_fetch_and_ack(Churn, Len, VQ6) + end, VQ3, [Duration / 4, 0, Duration / 4, infinity]), + + %% drain + {VQ8, AckTags} = variable_queue_fetch(Len, false, false, Len, VQ7), + {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags, VQ8), + {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9), + + VQ10. + +variable_queue_partial_segments_delta_thing(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_partial_segments_delta_thing1, [Config]). + +variable_queue_partial_segments_delta_thing1(Config) -> + with_fresh_variable_queue( + fun variable_queue_partial_segments_delta_thing2/1, + ?config(variable_queue_type, Config)). + +variable_queue_partial_segments_delta_thing2(VQ0) -> + SegmentSize = rabbit_queue_index:next_segment_boundary(0), + HalfSegment = SegmentSize div 2, + OneAndAHalfSegment = SegmentSize + HalfSegment, + VQ1 = variable_queue_publish(true, OneAndAHalfSegment, VQ0), + {_Duration, VQ2} = rabbit_variable_queue:ram_duration(VQ1), + VQ3 = check_variable_queue_status( + variable_queue_set_ram_duration_target(0, VQ2), + %% one segment in q3, and half a segment in delta + [{delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}}, + {q3, SegmentSize}, + {len, SegmentSize + HalfSegment}]), + VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3), + VQ5 = check_variable_queue_status( + variable_queue_publish(true, 1, VQ4), + %% one alpha, but it's in the same segment as the deltas + [{q1, 1}, + {delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}}, + {q3, SegmentSize}, + {len, SegmentSize + HalfSegment + 1}]), + {VQ6, AckTags} = variable_queue_fetch(SegmentSize, true, false, + SegmentSize + HalfSegment + 1, VQ5), + VQ7 = check_variable_queue_status( + VQ6, + %% the half segment should now be in q3 + [{q1, 1}, + {delta, {delta, undefined, 0, undefined}}, + {q3, HalfSegment}, + {len, HalfSegment + 1}]), + {VQ8, AckTags1} = variable_queue_fetch(HalfSegment + 1, true, false, + HalfSegment + 1, VQ7), + {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags ++ AckTags1, VQ8), + %% should be empty now + {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9), + VQ10. + +variable_queue_all_the_bits_not_covered_elsewhere_A(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_all_the_bits_not_covered_elsewhere_A1, [Config]). + +variable_queue_all_the_bits_not_covered_elsewhere_A1(Config) -> + with_fresh_variable_queue( + fun variable_queue_all_the_bits_not_covered_elsewhere_A2/1, + ?config(variable_queue_type, Config)). + +variable_queue_all_the_bits_not_covered_elsewhere_A2(VQ0) -> + Count = 2 * rabbit_queue_index:next_segment_boundary(0), + VQ1 = variable_queue_publish(true, Count, VQ0), + VQ2 = variable_queue_publish(false, Count, VQ1), + VQ3 = variable_queue_set_ram_duration_target(0, VQ2), + {VQ4, _AckTags} = variable_queue_fetch(Count, true, false, + Count + Count, VQ3), + {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false, + Count, VQ4), + _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5), + VQ7 = variable_queue_init(test_amqqueue(true), true), + {{_Msg1, true, _AckTag1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7), + Count1 = rabbit_variable_queue:len(VQ8), + VQ9 = variable_queue_publish(false, 1, VQ8), + VQ10 = variable_queue_set_ram_duration_target(0, VQ9), + {VQ11, _AckTags2} = variable_queue_fetch(Count1, true, true, Count, VQ10), + {VQ12, _AckTags3} = variable_queue_fetch(1, false, false, 1, VQ11), + VQ12. + +variable_queue_all_the_bits_not_covered_elsewhere_B(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_all_the_bits_not_covered_elsewhere_B1, [Config]). + +variable_queue_all_the_bits_not_covered_elsewhere_B1(Config) -> + with_fresh_variable_queue( + fun variable_queue_all_the_bits_not_covered_elsewhere_B2/1, + ?config(variable_queue_type, Config)). + +variable_queue_all_the_bits_not_covered_elsewhere_B2(VQ0) -> + VQ1 = variable_queue_set_ram_duration_target(0, VQ0), + VQ2 = variable_queue_publish(false, 4, VQ1), + {VQ3, AckTags} = variable_queue_fetch(2, false, false, 4, VQ2), + {_Guids, VQ4} = + rabbit_variable_queue:requeue(AckTags, VQ3), + VQ5 = rabbit_variable_queue:timeout(VQ4), + _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5), + VQ7 = variable_queue_init(test_amqqueue(true), true), + {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7), + VQ8. + +variable_queue_drop(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_drop1, [Config]). + +variable_queue_drop1(Config) -> + with_fresh_variable_queue( + fun variable_queue_drop2/1, + ?config(variable_queue_type, Config)). + +variable_queue_drop2(VQ0) -> + %% start by sending a messages + VQ1 = variable_queue_publish(false, 1, VQ0), + %% drop message with AckRequired = true + {{MsgId, AckTag}, VQ2} = rabbit_variable_queue:drop(true, VQ1), + true = rabbit_variable_queue:is_empty(VQ2), + true = AckTag =/= undefinded, + %% drop again -> empty + {empty, VQ3} = rabbit_variable_queue:drop(false, VQ2), + %% requeue + {[MsgId], VQ4} = rabbit_variable_queue:requeue([AckTag], VQ3), + %% drop message with AckRequired = false + {{MsgId, undefined}, VQ5} = rabbit_variable_queue:drop(false, VQ4), + true = rabbit_variable_queue:is_empty(VQ5), + VQ5. + +variable_queue_fold_msg_on_disk(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_fold_msg_on_disk1, [Config]). + +variable_queue_fold_msg_on_disk1(Config) -> + with_fresh_variable_queue( + fun variable_queue_fold_msg_on_disk2/1, + ?config(variable_queue_type, Config)). + +variable_queue_fold_msg_on_disk2(VQ0) -> + VQ1 = variable_queue_publish(true, 1, VQ0), + {VQ2, AckTags} = variable_queue_fetch(1, true, false, 1, VQ1), + {ok, VQ3} = rabbit_variable_queue:ackfold(fun (_M, _A, ok) -> ok end, + ok, VQ2, AckTags), + VQ3. + +variable_queue_dropfetchwhile(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_dropfetchwhile1, [Config]). + +variable_queue_dropfetchwhile1(Config) -> + with_fresh_variable_queue( + fun variable_queue_dropfetchwhile2/1, + ?config(variable_queue_type, Config)). + +variable_queue_dropfetchwhile2(VQ0) -> + Count = 10, + + %% add messages with sequential expiry + VQ1 = variable_queue_publish( + false, 1, Count, + fun (N, Props) -> Props#message_properties{expiry = N} end, + fun erlang:term_to_binary/1, VQ0), + + %% fetch the first 5 messages + {#message_properties{expiry = 6}, {Msgs, AckTags}, VQ2} = + rabbit_variable_queue:fetchwhile( + fun (#message_properties{expiry = Expiry}) -> Expiry =< 5 end, + fun (Msg, AckTag, {MsgAcc, AckAcc}) -> + {[Msg | MsgAcc], [AckTag | AckAcc]} + end, {[], []}, VQ1), + true = lists:seq(1, 5) == [msg2int(M) || M <- lists:reverse(Msgs)], + + %% requeue them + {_MsgIds, VQ3} = rabbit_variable_queue:requeue(AckTags, VQ2), + + %% drop the first 5 messages + {#message_properties{expiry = 6}, VQ4} = + rabbit_variable_queue:dropwhile( + fun (#message_properties {expiry = Expiry}) -> Expiry =< 5 end, VQ3), + + %% fetch 5 + VQ5 = lists:foldl(fun (N, VQN) -> + {{Msg, _, _}, VQM} = + rabbit_variable_queue:fetch(false, VQN), + true = msg2int(Msg) == N, + VQM + end, VQ4, lists:seq(6, Count)), + + %% should be empty now + true = rabbit_variable_queue:is_empty(VQ5), + + VQ5. + +variable_queue_dropwhile_varying_ram_duration(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_dropwhile_varying_ram_duration1, [Config]). + +variable_queue_dropwhile_varying_ram_duration1(Config) -> + with_fresh_variable_queue( + fun variable_queue_dropwhile_varying_ram_duration2/1, + ?config(variable_queue_type, Config)). + +variable_queue_dropwhile_varying_ram_duration2(VQ0) -> + test_dropfetchwhile_varying_ram_duration( + fun (VQ1) -> + {_, VQ2} = rabbit_variable_queue:dropwhile( + fun (_) -> false end, VQ1), + VQ2 + end, VQ0). + +variable_queue_fetchwhile_varying_ram_duration(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_fetchwhile_varying_ram_duration1, [Config]). + +variable_queue_fetchwhile_varying_ram_duration1(Config) -> + with_fresh_variable_queue( + fun variable_queue_fetchwhile_varying_ram_duration2/1, + ?config(variable_queue_type, Config)). + +variable_queue_fetchwhile_varying_ram_duration2(VQ0) -> + test_dropfetchwhile_varying_ram_duration( + fun (VQ1) -> + {_, ok, VQ2} = rabbit_variable_queue:fetchwhile( + fun (_) -> false end, + fun (_, _, A) -> A end, + ok, VQ1), + VQ2 + end, VQ0). + +test_dropfetchwhile_varying_ram_duration(Fun, VQ0) -> + VQ1 = variable_queue_publish(false, 1, VQ0), + VQ2 = variable_queue_set_ram_duration_target(0, VQ1), + VQ3 = Fun(VQ2), + VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3), + VQ5 = variable_queue_publish(false, 1, VQ4), + VQ6 = Fun(VQ5), + VQ6. + +variable_queue_ack_limiting(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_ack_limiting1, [Config]). + +variable_queue_ack_limiting1(Config) -> + with_fresh_variable_queue( + fun variable_queue_ack_limiting2/1, + ?config(variable_queue_type, Config)). + +variable_queue_ack_limiting2(VQ0) -> + %% start by sending in a bunch of messages + Len = 1024, + VQ1 = variable_queue_publish(false, Len, VQ0), + + %% squeeze and relax queue + Churn = Len div 32, + VQ2 = publish_fetch_and_ack(Churn, Len, VQ1), + + %% update stats for duration + {_Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2), + + %% fetch half the messages + {VQ4, _AckTags} = variable_queue_fetch(Len div 2, false, false, Len, VQ3), + + VQ5 = check_variable_queue_status( + VQ4, [{len, Len div 2}, + {messages_unacknowledged_ram, Len div 2}, + {messages_ready_ram, Len div 2}, + {messages_ram, Len}]), + + %% ensure all acks go to disk on 0 duration target + VQ6 = check_variable_queue_status( + variable_queue_set_ram_duration_target(0, VQ5), + [{len, Len div 2}, + {target_ram_count, 0}, + {messages_unacknowledged_ram, 0}, + {messages_ready_ram, 0}, + {messages_ram, 0}]), + + VQ6. + +variable_queue_purge(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_purge1, [Config]). + +variable_queue_purge1(Config) -> + with_fresh_variable_queue( + fun variable_queue_purge2/1, + ?config(variable_queue_type, Config)). + +variable_queue_purge2(VQ0) -> + LenDepth = fun (VQ) -> + {rabbit_variable_queue:len(VQ), + rabbit_variable_queue:depth(VQ)} + end, + VQ1 = variable_queue_publish(false, 10, VQ0), + {VQ2, Acks} = variable_queue_fetch(6, false, false, 10, VQ1), + {4, VQ3} = rabbit_variable_queue:purge(VQ2), + {0, 6} = LenDepth(VQ3), + {_, VQ4} = rabbit_variable_queue:requeue(lists:sublist(Acks, 2), VQ3), + {2, 6} = LenDepth(VQ4), + VQ5 = rabbit_variable_queue:purge_acks(VQ4), + {2, 2} = LenDepth(VQ5), + VQ5. + +variable_queue_requeue(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_requeue1, [Config]). + +variable_queue_requeue1(Config) -> + with_fresh_variable_queue( + fun variable_queue_requeue2/1, + ?config(variable_queue_type, Config)). + +variable_queue_requeue2(VQ0) -> + {_PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} = + variable_queue_with_holes(VQ0), + Msgs = + lists:zip(RequeuedMsgs, + lists:duplicate(length(RequeuedMsgs), true)) ++ + lists:zip(FreshMsgs, + lists:duplicate(length(FreshMsgs), false)), + VQ2 = lists:foldl(fun ({I, Requeued}, VQa) -> + {{M, MRequeued, _}, VQb} = + rabbit_variable_queue:fetch(true, VQa), + Requeued = MRequeued, %% assertion + I = msg2int(M), %% assertion + VQb + end, VQ1, Msgs), + {empty, VQ3} = rabbit_variable_queue:fetch(true, VQ2), + VQ3. + +%% requeue from ram_pending_ack into q3, move to delta and then empty queue +variable_queue_requeue_ram_beta(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_requeue_ram_beta1, [Config]). + +variable_queue_requeue_ram_beta1(Config) -> + with_fresh_variable_queue( + fun variable_queue_requeue_ram_beta2/1, + ?config(variable_queue_type, Config)). + +variable_queue_requeue_ram_beta2(VQ0) -> + Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2, + VQ1 = variable_queue_publish(false, Count, VQ0), + {VQ2, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ1), + {Back, Front} = lists:split(Count div 2, AcksR), + {_, VQ3} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ2), + VQ4 = variable_queue_set_ram_duration_target(0, VQ3), + {_, VQ5} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ4), + VQ6 = requeue_one_by_one(Front, VQ5), + {VQ7, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ6), + {_, VQ8} = rabbit_variable_queue:ack(AcksAll, VQ7), + VQ8. + +variable_queue_fold(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_fold1, [Config]). + +variable_queue_fold1(Config) -> + with_fresh_variable_queue( + fun variable_queue_fold2/1, + ?config(variable_queue_type, Config)). + +variable_queue_fold2(VQ0) -> + {PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} = + variable_queue_with_holes(VQ0), + Count = rabbit_variable_queue:depth(VQ1), + Msgs = lists:sort(PendingMsgs ++ RequeuedMsgs ++ FreshMsgs), + lists:foldl(fun (Cut, VQ2) -> + test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ2) + end, VQ1, [0, 1, 2, Count div 2, + Count - 1, Count, Count + 1, Count * 2]). + +test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ0) -> + {Acc, VQ1} = rabbit_variable_queue:fold( + fun (M, _, Pending, A) -> + MInt = msg2int(M), + Pending = lists:member(MInt, PendingMsgs), %% assert + case MInt =< Cut of + true -> {cont, [MInt | A]}; + false -> {stop, A} + end + end, [], VQ0), + Expected = lists:takewhile(fun (I) -> I =< Cut end, Msgs), + Expected = lists:reverse(Acc), %% assertion + VQ1. + +variable_queue_batch_publish(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_batch_publish1, [Config]). + +variable_queue_batch_publish1(Config) -> + with_fresh_variable_queue( + fun variable_queue_batch_publish2/1, + ?config(variable_queue_type, Config)). + +variable_queue_batch_publish2(VQ) -> + Count = 10, + VQ1 = variable_queue_batch_publish(true, Count, VQ), + Count = rabbit_variable_queue:len(VQ1), + VQ1. + +variable_queue_batch_publish_delivered(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_batch_publish_delivered1, [Config]). + +variable_queue_batch_publish_delivered1(Config) -> + with_fresh_variable_queue( + fun variable_queue_batch_publish_delivered2/1, + ?config(variable_queue_type, Config)). + +variable_queue_batch_publish_delivered2(VQ) -> + Count = 10, + VQ1 = variable_queue_batch_publish_delivered(true, Count, VQ), + Count = rabbit_variable_queue:depth(VQ1), + VQ1. + +%% same as test_variable_queue_requeue_ram_beta but randomly changing +%% the queue mode after every step. +variable_queue_mode_change(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, variable_queue_mode_change1, [Config]). + +variable_queue_mode_change1(Config) -> + with_fresh_variable_queue( + fun variable_queue_mode_change2/1, + ?config(variable_queue_type, Config)). + +variable_queue_mode_change2(VQ0) -> + Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2, + VQ1 = variable_queue_publish(false, Count, VQ0), + VQ2 = maybe_switch_queue_mode(VQ1), + {VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2), + VQ4 = maybe_switch_queue_mode(VQ3), + {Back, Front} = lists:split(Count div 2, AcksR), + {_, VQ5} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ4), + VQ6 = maybe_switch_queue_mode(VQ5), + VQ7 = variable_queue_set_ram_duration_target(0, VQ6), + VQ8 = maybe_switch_queue_mode(VQ7), + {_, VQ9} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ8), + VQ10 = maybe_switch_queue_mode(VQ9), + VQ11 = requeue_one_by_one(Front, VQ10), + VQ12 = maybe_switch_queue_mode(VQ11), + {VQ13, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ12), + VQ14 = maybe_switch_queue_mode(VQ13), + {_, VQ15} = rabbit_variable_queue:ack(AcksAll, VQ14), + VQ16 = maybe_switch_queue_mode(VQ15), + VQ16. + +maybe_switch_queue_mode(VQ) -> + Mode = random_queue_mode(), + set_queue_mode(Mode, VQ). + +random_queue_mode() -> + Modes = [lazy, default], + lists:nth(random:uniform(length(Modes)), Modes). + +pub_res({_, VQS}) -> + VQS; +pub_res(VQS) -> + VQS. + +make_publish(IsPersistent, PayloadFun, PropFun, N) -> + {rabbit_basic:message( + rabbit_misc:r(<<>>, exchange, <<>>), + <<>>, #'P_basic'{delivery_mode = case IsPersistent of + true -> 2; + false -> 1 + end}, + PayloadFun(N)), + PropFun(N, #message_properties{size = 10}), + false}. + +make_publish_delivered(IsPersistent, PayloadFun, PropFun, N) -> + {rabbit_basic:message( + rabbit_misc:r(<<>>, exchange, <<>>), + <<>>, #'P_basic'{delivery_mode = case IsPersistent of + true -> 2; + false -> 1 + end}, + PayloadFun(N)), + PropFun(N, #message_properties{size = 10})}. + +queue_name(Config, Name) -> + Name1 = rabbit_ct_helpers:config_to_testcase_name(Config, Name), + queue_name(Name1). + +queue_name(Name) -> + rabbit_misc:r(<<"/">>, queue, Name). + +test_queue() -> + queue_name(<<"test">>). + +init_test_queue() -> + TestQueue = test_queue(), + PRef = rabbit_guid:gen(), + PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef), + Res = rabbit_queue_index:recover( + TestQueue, [], false, + fun (MsgId) -> + rabbit_msg_store:contains(MsgId, PersistentClient) + end, + fun nop/1, fun nop/1), + ok = rabbit_msg_store:client_delete_and_terminate(PersistentClient), + Res. + +restart_test_queue(Qi) -> + _ = rabbit_queue_index:terminate([], Qi), + ok = rabbit_variable_queue:stop(), + {ok, _} = rabbit_variable_queue:start([test_queue()]), + init_test_queue(). + +empty_test_queue() -> + ok = rabbit_variable_queue:stop(), + {ok, _} = rabbit_variable_queue:start([]), + {0, 0, Qi} = init_test_queue(), + _ = rabbit_queue_index:delete_and_terminate(Qi), + ok. + +with_empty_test_queue(Fun) -> + ok = empty_test_queue(), + {0, 0, Qi} = init_test_queue(), + rabbit_queue_index:delete_and_terminate(Fun(Qi)). + +restart_app() -> + rabbit:stop(), + rabbit:start(). + +queue_index_publish(SeqIds, Persistent, Qi) -> + Ref = rabbit_guid:gen(), + MsgStore = case Persistent of + true -> ?PERSISTENT_MSG_STORE; + false -> ?TRANSIENT_MSG_STORE + end, + MSCState = msg_store_client_init(MsgStore, Ref), + {A, B = [{_SeqId, LastMsgIdWritten} | _]} = + lists:foldl( + fun (SeqId, {QiN, SeqIdsMsgIdsAcc}) -> + MsgId = rabbit_guid:gen(), + QiM = rabbit_queue_index:publish( + MsgId, SeqId, #message_properties{size = 10}, + Persistent, infinity, QiN), + ok = rabbit_msg_store:write(MsgId, MsgId, MSCState), + {QiM, [{SeqId, MsgId} | SeqIdsMsgIdsAcc]} + end, {Qi, []}, SeqIds), + %% do this just to force all of the publishes through to the msg_store: + true = rabbit_msg_store:contains(LastMsgIdWritten, MSCState), + ok = rabbit_msg_store:client_delete_and_terminate(MSCState), + {A, B}. + +verify_read_with_published(_Delivered, _Persistent, [], _) -> + ok; +verify_read_with_published(Delivered, Persistent, + [{MsgId, SeqId, _Props, Persistent, Delivered}|Read], + [{SeqId, MsgId}|Published]) -> + verify_read_with_published(Delivered, Persistent, Read, Published); +verify_read_with_published(_Delivered, _Persistent, _Read, _Published) -> + ko. + +nop(_) -> ok. +nop(_, _) -> ok. + +msg_store_client_init(MsgStore, Ref) -> + rabbit_msg_store:client_init(MsgStore, Ref, undefined, undefined). + +variable_queue_init(Q, Recover) -> + rabbit_variable_queue:init( + Q, case Recover of + true -> non_clean_shutdown; + false -> new + end, fun nop/2, fun nop/2, fun nop/1, fun nop/1). + +publish_and_confirm(Q, Payload, Count) -> + Seqs = lists:seq(1, Count), + [begin + Msg = rabbit_basic:message(rabbit_misc:r(<<>>, exchange, <<>>), + <<>>, #'P_basic'{delivery_mode = 2}, + Payload), + Delivery = #delivery{mandatory = false, sender = self(), + confirm = true, message = Msg, msg_seq_no = Seq, + flow = noflow}, + _QPids = rabbit_amqqueue:deliver([Q], Delivery) + end || Seq <- Seqs], + wait_for_confirms(gb_sets:from_list(Seqs)). + +wait_for_confirms(Unconfirmed) -> + case gb_sets:is_empty(Unconfirmed) of + true -> ok; + false -> receive {'$gen_cast', {confirm, Confirmed, _}} -> + wait_for_confirms( + rabbit_misc:gb_sets_difference( + Unconfirmed, gb_sets:from_list(Confirmed))) + after ?TIMEOUT -> exit(timeout_waiting_for_confirm) + end + end. + +with_fresh_variable_queue(Fun, Mode) -> + Ref = make_ref(), + Me = self(), + %% Run in a separate process since rabbit_msg_store will send + %% bump_credit messages and we want to ignore them + spawn_link(fun() -> + ok = empty_test_queue(), + VQ = variable_queue_init(test_amqqueue(true), false), + S0 = variable_queue_status(VQ), + assert_props(S0, [{q1, 0}, {q2, 0}, + {delta, + {delta, undefined, 0, undefined}}, + {q3, 0}, {q4, 0}, + {len, 0}]), + VQ1 = set_queue_mode(Mode, VQ), + try + _ = rabbit_variable_queue:delete_and_terminate( + shutdown, Fun(VQ1)), + Me ! Ref + catch + Type:Error -> + Me ! {Ref, Type, Error, erlang:get_stacktrace()} + end + end), + receive + Ref -> ok; + {Ref, Type, Error, ST} -> exit({Type, Error, ST}) + end, + passed. + +set_queue_mode(Mode, VQ) -> + VQ1 = rabbit_variable_queue:set_queue_mode(Mode, VQ), + S1 = variable_queue_status(VQ1), + assert_props(S1, [{mode, Mode}]), + VQ1. + +variable_queue_publish(IsPersistent, Count, VQ) -> + variable_queue_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ). + +variable_queue_publish(IsPersistent, Count, PropFun, VQ) -> + variable_queue_publish(IsPersistent, 1, Count, PropFun, + fun (_N) -> <<>> end, VQ). + +variable_queue_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) -> + variable_queue_wait_for_shuffling_end( + lists:foldl( + fun (N, VQN) -> + rabbit_variable_queue:publish( + rabbit_basic:message( + rabbit_misc:r(<<>>, exchange, <<>>), + <<>>, #'P_basic'{delivery_mode = case IsPersistent of + true -> 2; + false -> 1 + end}, + PayloadFun(N)), + PropFun(N, #message_properties{size = 10}), + false, self(), noflow, VQN) + end, VQ, lists:seq(Start, Start + Count - 1))). + +variable_queue_batch_publish(IsPersistent, Count, VQ) -> + variable_queue_batch_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ). + +variable_queue_batch_publish(IsPersistent, Count, PropFun, VQ) -> + variable_queue_batch_publish(IsPersistent, 1, Count, PropFun, + fun (_N) -> <<>> end, VQ). + +variable_queue_batch_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) -> + variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun, + PayloadFun, fun make_publish/4, + fun rabbit_variable_queue:batch_publish/4, + VQ). + +variable_queue_batch_publish_delivered(IsPersistent, Count, VQ) -> + variable_queue_batch_publish_delivered(IsPersistent, Count, fun (_N, P) -> P end, VQ). + +variable_queue_batch_publish_delivered(IsPersistent, Count, PropFun, VQ) -> + variable_queue_batch_publish_delivered(IsPersistent, 1, Count, PropFun, + fun (_N) -> <<>> end, VQ). + +variable_queue_batch_publish_delivered(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) -> + variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun, + PayloadFun, fun make_publish_delivered/4, + fun rabbit_variable_queue:batch_publish_delivered/4, + VQ). + +variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun, PayloadFun, + MakePubFun, PubFun, VQ) -> + Publishes = + [MakePubFun(IsPersistent, PayloadFun, PropFun, N) + || N <- lists:seq(Start, Start + Count - 1)], + Res = PubFun(Publishes, self(), noflow, VQ), + VQ1 = pub_res(Res), + variable_queue_wait_for_shuffling_end(VQ1). + +variable_queue_fetch(Count, IsPersistent, IsDelivered, Len, VQ) -> + lists:foldl(fun (N, {VQN, AckTagsAcc}) -> + Rem = Len - N, + {{#basic_message { is_persistent = IsPersistent }, + IsDelivered, AckTagN}, VQM} = + rabbit_variable_queue:fetch(true, VQN), + Rem = rabbit_variable_queue:len(VQM), + {VQM, [AckTagN | AckTagsAcc]} + end, {VQ, []}, lists:seq(1, Count)). + +test_amqqueue(Durable) -> + (rabbit_amqqueue:pseudo_queue(test_queue(), self())) + #amqqueue { durable = Durable }. + +assert_prop(List, Prop, Value) -> + case proplists:get_value(Prop, List)of + Value -> ok; + _ -> {exit, Prop, exp, Value, List} + end. + +assert_props(List, PropVals) -> + [assert_prop(List, Prop, Value) || {Prop, Value} <- PropVals]. + +variable_queue_set_ram_duration_target(Duration, VQ) -> + variable_queue_wait_for_shuffling_end( + rabbit_variable_queue:set_ram_duration_target(Duration, VQ)). + +publish_fetch_and_ack(0, _Len, VQ0) -> + VQ0; +publish_fetch_and_ack(N, Len, VQ0) -> + VQ1 = variable_queue_publish(false, 1, VQ0), + {{_Msg, false, AckTag}, VQ2} = rabbit_variable_queue:fetch(true, VQ1), + Len = rabbit_variable_queue:len(VQ2), + {_Guids, VQ3} = rabbit_variable_queue:ack([AckTag], VQ2), + publish_fetch_and_ack(N-1, Len, VQ3). + +variable_queue_status(VQ) -> + Keys = rabbit_backing_queue:info_keys() -- [backing_queue_status], + [{K, rabbit_variable_queue:info(K, VQ)} || K <- Keys] ++ + rabbit_variable_queue:info(backing_queue_status, VQ). + +variable_queue_wait_for_shuffling_end(VQ) -> + case credit_flow:blocked() of + false -> VQ; + true -> receive + {bump_credit, Msg} -> + credit_flow:handle_bump_msg(Msg), + variable_queue_wait_for_shuffling_end( + rabbit_variable_queue:resume(VQ)) + end + end. + +msg2int(#basic_message{content = #content{ payload_fragments_rev = P}}) -> + binary_to_term(list_to_binary(lists:reverse(P))). + +ack_subset(AckSeqs, Interval, Rem) -> + lists:filter(fun ({_Ack, N}) -> (N + Rem) rem Interval == 0 end, AckSeqs). + +requeue_one_by_one(Acks, VQ) -> + lists:foldl(fun (AckTag, VQN) -> + {_MsgId, VQM} = rabbit_variable_queue:requeue( + [AckTag], VQN), + VQM + end, VQ, Acks). + +%% Create a vq with messages in q1, delta, and q3, and holes (in the +%% form of pending acks) in the latter two. +variable_queue_with_holes(VQ0) -> + Interval = 2048, %% should match vq:IO_BATCH_SIZE + Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2 * Interval, + Seq = lists:seq(1, Count), + VQ1 = variable_queue_set_ram_duration_target(0, VQ0), + VQ2 = variable_queue_publish( + false, 1, Count, + fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ1), + {VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2), + Acks = lists:reverse(AcksR), + AckSeqs = lists:zip(Acks, Seq), + [{Subset1, _Seq1}, {Subset2, _Seq2}, {Subset3, Seq3}] = + [lists:unzip(ack_subset(AckSeqs, Interval, I)) || I <- [0, 1, 2]], + %% we requeue in three phases in order to exercise requeuing logic + %% in various vq states + {_MsgIds, VQ4} = rabbit_variable_queue:requeue( + Acks -- (Subset1 ++ Subset2 ++ Subset3), VQ3), + VQ5 = requeue_one_by_one(Subset1, VQ4), + %% by now we have some messages (and holes) in delta + VQ6 = requeue_one_by_one(Subset2, VQ5), + VQ7 = variable_queue_set_ram_duration_target(infinity, VQ6), + %% add the q1 tail + VQ8 = variable_queue_publish( + true, Count + 1, Interval, + fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ7), + %% assertions + Status = variable_queue_status(VQ8), + vq_with_holes_assertions(VQ8, proplists:get_value(mode, Status)), + Depth = Count + Interval, + Depth = rabbit_variable_queue:depth(VQ8), + Len = Depth - length(Subset3), + Len = rabbit_variable_queue:len(VQ8), + {Seq3, Seq -- Seq3, lists:seq(Count + 1, Count + Interval), VQ8}. + +vq_with_holes_assertions(VQ, default) -> + [false = + case V of + {delta, _, 0, _} -> true; + 0 -> true; + _ -> false + end || {K, V} <- variable_queue_status(VQ), + lists:member(K, [q1, delta, q3])]; +vq_with_holes_assertions(VQ, lazy) -> + [false = + case V of + {delta, _, 0, _} -> true; + _ -> false + end || {K, V} <- variable_queue_status(VQ), + lists:member(K, [delta])]. + +check_variable_queue_status(VQ0, Props) -> + VQ1 = variable_queue_wait_for_shuffling_end(VQ0), + S = variable_queue_status(VQ1), + assert_props(S, Props), + VQ1. + +%% --------------------------------------------------------------------------- +%% Credit flow. +%% --------------------------------------------------------------------------- + +credit_flow_settings(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, credit_flow_settings1, [Config]). + +credit_flow_settings1(_Config) -> + %% default values + passed = test_proc(200, 50), + + application:set_env(rabbit, credit_flow_default_credit, {100, 20}), + passed = test_proc(100, 20), + + application:unset_env(rabbit, credit_flow_default_credit), + + % back to defaults + passed = test_proc(200, 50), + passed. + +test_proc(InitialCredit, MoreCreditAfter) -> + Pid = spawn(fun dummy/0), + Pid ! {credit, self()}, + {InitialCredit, MoreCreditAfter} = + receive + {credit, Val} -> Val + end, + passed. + +dummy() -> + credit_flow:send(self()), + receive + {credit, From} -> + From ! {credit, get(credit_flow_default_credit)}; + _ -> + dummy() + end. + +%% ------------------------------------------------------------------- +%% dynamic_mirroring. +%% ------------------------------------------------------------------- + +dynamic_mirroring(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, dynamic_mirroring1, [Config]). + +dynamic_mirroring1(_Config) -> + %% Just unit tests of the node selection logic, see multi node + %% tests for the rest... + Test = fun ({NewM, NewSs, ExtraSs}, Policy, Params, + {MNode, SNodes, SSNodes}, All) -> + {ok, M} = rabbit_mirror_queue_misc:module(Policy), + {NewM, NewSs0} = M:suggested_queue_nodes( + Params, MNode, SNodes, SSNodes, All), + NewSs1 = lists:sort(NewSs0), + case dm_list_match(NewSs, NewSs1, ExtraSs) of + ok -> ok; + error -> exit({no_match, NewSs, NewSs1, ExtraSs}) + end + end, + + Test({a,[b,c],0},<<"all">>,'_',{a,[], []}, [a,b,c]), + Test({a,[b,c],0},<<"all">>,'_',{a,[b,c],[b,c]},[a,b,c]), + Test({a,[b,c],0},<<"all">>,'_',{a,[d], [d]}, [a,b,c]), + + N = fun (Atoms) -> [list_to_binary(atom_to_list(A)) || A <- Atoms] end, + + %% Add a node + Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[b],[b]},[a,b,c,d]), + Test({b,[a,c],0},<<"nodes">>,N([a,b,c]),{b,[a],[a]},[a,b,c,d]), + %% Add two nodes and drop one + Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[d],[d]},[a,b,c,d]), + %% Don't try to include nodes that are not running + Test({a,[b], 0},<<"nodes">>,N([a,b,f]),{a,[b],[b]},[a,b,c,d]), + %% If we can't find any of the nodes listed then just keep the master + Test({a,[], 0},<<"nodes">>,N([f,g,h]),{a,[b],[b]},[a,b,c,d]), + %% And once that's happened, still keep the master even when not listed, + %% if nothing is synced + Test({a,[b,c],0},<<"nodes">>,N([b,c]), {a,[], []}, [a,b,c,d]), + Test({a,[b,c],0},<<"nodes">>,N([b,c]), {a,[b],[]}, [a,b,c,d]), + %% But if something is synced we can lose the master - but make + %% sure we pick the new master from the nodes which are synced! + Test({b,[c], 0},<<"nodes">>,N([b,c]), {a,[b],[b]},[a,b,c,d]), + Test({b,[c], 0},<<"nodes">>,N([c,b]), {a,[b],[b]},[a,b,c,d]), + + Test({a,[], 1},<<"exactly">>,2,{a,[], []}, [a,b,c,d]), + Test({a,[], 2},<<"exactly">>,3,{a,[], []}, [a,b,c,d]), + Test({a,[c], 0},<<"exactly">>,2,{a,[c], [c]}, [a,b,c,d]), + Test({a,[c], 1},<<"exactly">>,3,{a,[c], [c]}, [a,b,c,d]), + Test({a,[c], 0},<<"exactly">>,2,{a,[c,d],[c,d]},[a,b,c,d]), + Test({a,[c,d],0},<<"exactly">>,3,{a,[c,d],[c,d]},[a,b,c,d]), + + passed. + +%% Does the first list match the second where the second is required +%% to have exactly Extra superfluous items? +dm_list_match([], [], 0) -> ok; +dm_list_match(_, [], _Extra) -> error; +dm_list_match([H|T1], [H |T2], Extra) -> dm_list_match(T1, T2, Extra); +dm_list_match(L1, [_H|T2], Extra) -> dm_list_match(L1, T2, Extra - 1). + +%% --------------------------------------------------------------------------- +%% file_handle_cache. +%% --------------------------------------------------------------------------- + +file_handle_cache(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, file_handle_cache1, [Config]). + +file_handle_cache1(_Config) -> + %% test copying when there is just one spare handle + Limit = file_handle_cache:get_limit(), + ok = file_handle_cache:set_limit(5), %% 1 or 2 sockets, 2 msg_stores + TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"), + ok = filelib:ensure_dir(filename:join(TmpDir, "nothing")), + [Src1, Dst1, Src2, Dst2] = Files = + [filename:join(TmpDir, Str) || Str <- ["file1", "file2", "file3", "file4"]], + Content = <<"foo">>, + CopyFun = fun (Src, Dst) -> + {ok, Hdl} = prim_file:open(Src, [binary, write]), + ok = prim_file:write(Hdl, Content), + ok = prim_file:sync(Hdl), + prim_file:close(Hdl), + + {ok, SrcHdl} = file_handle_cache:open(Src, [read], []), + {ok, DstHdl} = file_handle_cache:open(Dst, [write], []), + Size = size(Content), + {ok, Size} = file_handle_cache:copy(SrcHdl, DstHdl, Size), + ok = file_handle_cache:delete(SrcHdl), + ok = file_handle_cache:delete(DstHdl) + end, + Pid = spawn(fun () -> {ok, Hdl} = file_handle_cache:open( + filename:join(TmpDir, "file5"), + [write], []), + receive {next, Pid1} -> Pid1 ! {next, self()} end, + file_handle_cache:delete(Hdl), + %% This will block and never return, so we + %% exercise the fhc tidying up the pending + %% queue on the death of a process. + ok = CopyFun(Src1, Dst1) + end), + ok = CopyFun(Src1, Dst1), + ok = file_handle_cache:set_limit(2), + Pid ! {next, self()}, + receive {next, Pid} -> ok end, + timer:sleep(100), + Pid1 = spawn(fun () -> CopyFun(Src2, Dst2) end), + timer:sleep(100), + erlang:monitor(process, Pid), + erlang:monitor(process, Pid1), + exit(Pid, kill), + exit(Pid1, kill), + receive {'DOWN', _MRef, process, Pid, _Reason} -> ok end, + receive {'DOWN', _MRef1, process, Pid1, _Reason1} -> ok end, + [file:delete(File) || File <- Files], + ok = file_handle_cache:set_limit(Limit), + passed. + +%% ------------------------------------------------------------------- +%% Log management. +%% ------------------------------------------------------------------- + +log_management(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, log_management1, [Config]). + +log_management1(_Config) -> + override_group_leader(), + + MainLog = rabbit:log_location(kernel), + SaslLog = rabbit:log_location(sasl), + Suffix = ".1", + + ok = test_logs_working(MainLog, SaslLog), + + %% prepare basic logs + file:delete([MainLog, Suffix]), + file:delete([SaslLog, Suffix]), + + %% simple logs reopening + ok = control_action(rotate_logs, []), + ok = test_logs_working(MainLog, SaslLog), + + %% simple log rotation + ok = control_action(rotate_logs, [Suffix]), + [true, true] = non_empty_files([[MainLog, Suffix], [SaslLog, Suffix]]), + [true, true] = empty_files([MainLog, SaslLog]), + ok = test_logs_working(MainLog, SaslLog), + + %% reopening logs with log rotation performed first + ok = clean_logs([MainLog, SaslLog], Suffix), + ok = control_action(rotate_logs, []), + ok = file:rename(MainLog, [MainLog, Suffix]), + ok = file:rename(SaslLog, [SaslLog, Suffix]), + ok = test_logs_working([MainLog, Suffix], [SaslLog, Suffix]), + ok = control_action(rotate_logs, []), + ok = test_logs_working(MainLog, SaslLog), + + %% log rotation on empty files (the main log will have a ctl action logged) + ok = clean_logs([MainLog, SaslLog], Suffix), + ok = control_action(rotate_logs, []), + ok = control_action(rotate_logs, [Suffix]), + [false, true] = empty_files([[MainLog, Suffix], [SaslLog, Suffix]]), + + %% logs with suffix are not writable + ok = control_action(rotate_logs, [Suffix]), + ok = make_files_non_writable([[MainLog, Suffix], [SaslLog, Suffix]]), + ok = control_action(rotate_logs, [Suffix]), + ok = test_logs_working(MainLog, SaslLog), + + %% logging directed to tty (first, remove handlers) + ok = delete_log_handlers([rabbit_sasl_report_file_h, + rabbit_error_logger_file_h]), + ok = clean_logs([MainLog, SaslLog], Suffix), + ok = application:set_env(rabbit, sasl_error_logger, tty), + ok = application:set_env(rabbit, error_logger, tty), + ok = control_action(rotate_logs, []), + [{error, enoent}, {error, enoent}] = empty_files([MainLog, SaslLog]), + + %% rotate logs when logging is turned off + ok = application:set_env(rabbit, sasl_error_logger, false), + ok = application:set_env(rabbit, error_logger, silent), + ok = control_action(rotate_logs, []), + [{error, enoent}, {error, enoent}] = empty_files([MainLog, SaslLog]), + + %% cleanup + ok = application:set_env(rabbit, sasl_error_logger, {file, SaslLog}), + ok = application:set_env(rabbit, error_logger, {file, MainLog}), + ok = add_log_handlers([{rabbit_error_logger_file_h, MainLog}, + {rabbit_sasl_report_file_h, SaslLog}]), + passed. + +log_management_during_startup(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, log_management_during_startup1, [Config]). + +log_management_during_startup1(_Config) -> + MainLog = rabbit:log_location(kernel), + SaslLog = rabbit:log_location(sasl), + + %% start application with simple tty logging + ok = control_action(stop_app, []), + ok = application:set_env(rabbit, error_logger, tty), + ok = application:set_env(rabbit, sasl_error_logger, tty), + ok = add_log_handlers([{error_logger_tty_h, []}, + {sasl_report_tty_h, []}]), + ok = control_action(start_app, []), + + %% start application with tty logging and + %% proper handlers not installed + ok = control_action(stop_app, []), + ok = error_logger:tty(false), + ok = delete_log_handlers([sasl_report_tty_h]), + ok = case catch control_action(start_app, []) of + ok -> exit({got_success_but_expected_failure, + log_rotation_tty_no_handlers_test}); + {badrpc, {'EXIT', {error, + {cannot_log_to_tty, _, not_installed}}}} -> ok + end, + + %% fix sasl logging + ok = application:set_env(rabbit, sasl_error_logger, {file, SaslLog}), + + %% start application with logging to non-existing directory + TmpLog = "/tmp/rabbit-tests/test.log", + delete_file(TmpLog), + ok = control_action(stop_app, []), + ok = application:set_env(rabbit, error_logger, {file, TmpLog}), + + ok = delete_log_handlers([rabbit_error_logger_file_h]), + ok = add_log_handlers([{error_logger_file_h, MainLog}]), + ok = control_action(start_app, []), + + %% start application with logging to directory with no + %% write permissions + ok = control_action(stop_app, []), + TmpDir = "/tmp/rabbit-tests", + ok = set_permissions(TmpDir, 8#00400), + ok = delete_log_handlers([rabbit_error_logger_file_h]), + ok = add_log_handlers([{error_logger_file_h, MainLog}]), + ok = case control_action(start_app, []) of + ok -> exit({got_success_but_expected_failure, + log_rotation_no_write_permission_dir_test}); + {badrpc, {'EXIT', + {error, {cannot_log_to_file, _, _}}}} -> ok + end, + + %% start application with logging to a subdirectory which + %% parent directory has no write permissions + ok = control_action(stop_app, []), + TmpTestDir = "/tmp/rabbit-tests/no-permission/test/log", + ok = application:set_env(rabbit, error_logger, {file, TmpTestDir}), + ok = add_log_handlers([{error_logger_file_h, MainLog}]), + ok = case control_action(start_app, []) of + ok -> exit({got_success_but_expected_failure, + log_rotatation_parent_dirs_test}); + {badrpc, + {'EXIT', + {error, {cannot_log_to_file, _, + {error, + {cannot_create_parent_dirs, _, eacces}}}}}} -> ok + end, + ok = set_permissions(TmpDir, 8#00700), + ok = set_permissions(TmpLog, 8#00600), + ok = delete_file(TmpLog), + ok = file:del_dir(TmpDir), + + %% start application with standard error_logger_file_h + %% handler not installed + ok = control_action(stop_app, []), + ok = application:set_env(rabbit, error_logger, {file, MainLog}), + ok = control_action(start_app, []), + + %% start application with standard sasl handler not installed + %% and rabbit main log handler installed correctly + ok = control_action(stop_app, []), + ok = delete_log_handlers([rabbit_sasl_report_file_h]), + ok = control_action(start_app, []), + passed. + +%% "rabbitmqctl rotate_logs" without additional parameters +%% shouldn't truncate files. +rotate_logs_without_suffix(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, rotate_logs_without_suffix1, [Config]). + +rotate_logs_without_suffix1(_Config) -> + override_group_leader(), + + MainLog = rabbit:log_location(kernel), + SaslLog = rabbit:log_location(sasl), + Suffix = ".1", + file:delete(MainLog), + file:delete(SaslLog), + + %% Empty log-files should be created + ok = control_action(rotate_logs, []), + [true, true] = empty_files([MainLog, SaslLog]), + + %% Write something to log files and simulate external log rotation + ok = test_logs_working(MainLog, SaslLog), + ok = file:rename(MainLog, [MainLog, Suffix]), + ok = file:rename(SaslLog, [SaslLog, Suffix]), + + %% Create non-empty files + TestData = "test-data\n", + file:write_file(MainLog, TestData), + file:write_file(SaslLog, TestData), + + %% Nothing should be truncated - neither moved files which are still + %% opened by server, nor new log files that should be just reopened. + ok = control_action(rotate_logs, []), + [true, true, true, true] = + non_empty_files([MainLog, SaslLog, [MainLog, Suffix], + [SaslLog, Suffix]]), + + %% And log files should be re-opened - new log records should go to + %% new files. + ok = test_logs_working(MainLog, SaslLog), + true = (rabbit_file:file_size(MainLog) > length(TestData)), + true = (rabbit_file:file_size(SaslLog) > length(TestData)), + passed. + +override_group_leader() -> + %% Override group leader, otherwise SASL fake events are ignored by + %% the error_logger local to RabbitMQ. + {group_leader, Leader} = erlang:process_info(whereis(rabbit), group_leader), + erlang:group_leader(Leader, self()). + +empty_files(Files) -> + [case file:read_file_info(File) of + {ok, FInfo} -> FInfo#file_info.size == 0; + Error -> Error + end || File <- Files]. + +non_empty_files(Files) -> + [case EmptyFile of + {error, Reason} -> {error, Reason}; + _ -> not(EmptyFile) + end || EmptyFile <- empty_files(Files)]. + +test_logs_working(MainLogFile, SaslLogFile) -> + ok = rabbit_log:error("Log a test message~n"), + ok = error_logger:error_report(crash_report, [fake_crash_report, ?MODULE]), + %% give the error loggers some time to catch up + timer:sleep(100), + [true, true] = non_empty_files([MainLogFile, SaslLogFile]), + ok. + +set_permissions(Path, Mode) -> + case file:read_file_info(Path) of + {ok, FInfo} -> file:write_file_info( + Path, + FInfo#file_info{mode=Mode}); + Error -> Error + end. + +clean_logs(Files, Suffix) -> + [begin + ok = delete_file(File), + ok = delete_file([File, Suffix]) + end || File <- Files], + ok. + +assert_ram_node() -> + case rabbit_mnesia:node_type() of + disc -> exit('not_ram_node'); + ram -> ok + end. + +assert_disc_node() -> + case rabbit_mnesia:node_type() of + disc -> ok; + ram -> exit('not_disc_node') + end. + +delete_file(File) -> + case file:delete(File) of + ok -> ok; + {error, enoent} -> ok; + Error -> Error + end. + +make_files_non_writable(Files) -> + [ok = file:write_file_info(File, #file_info{mode=8#444}) || + File <- Files], + ok. + +add_log_handlers(Handlers) -> + [ok = error_logger:add_report_handler(Handler, Args) || + {Handler, Args} <- Handlers], + ok. + +%% sasl_report_file_h returns [] during terminate +%% see: https://github.com/erlang/otp/blob/maint/lib/stdlib/src/error_logger_file_h.erl#L98 +%% +%% error_logger_file_h returns ok since OTP 18.1 +%% see: https://github.com/erlang/otp/blob/maint/lib/stdlib/src/error_logger_file_h.erl#L98 +delete_log_handlers(Handlers) -> + [ok_or_empty_list(error_logger:delete_report_handler(Handler)) + || Handler <- Handlers], + ok. + +ok_or_empty_list([]) -> + []; +ok_or_empty_list(ok) -> + ok. + +%% --------------------------------------------------------------------------- +%% Password hashing. +%% --------------------------------------------------------------------------- + +password_hashing(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, password_hashing1, [Config]). + +password_hashing1(_Config) -> + rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(), + application:set_env(rabbit, password_hashing_module, + rabbit_password_hashing_md5), + rabbit_password_hashing_md5 = rabbit_password:hashing_mod(), + application:set_env(rabbit, password_hashing_module, + rabbit_password_hashing_sha256), + rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(), + + rabbit_password_hashing_sha256 = + rabbit_password:hashing_mod(rabbit_password_hashing_sha256), + rabbit_password_hashing_md5 = + rabbit_password:hashing_mod(rabbit_password_hashing_md5), + rabbit_password_hashing_md5 = + rabbit_password:hashing_mod(undefined), + + rabbit_password_hashing_md5 = + rabbit_auth_backend_internal:hashing_module_for_user( + #internal_user{}), + rabbit_password_hashing_md5 = + rabbit_auth_backend_internal:hashing_module_for_user( + #internal_user{ + hashing_algorithm = undefined + }), + rabbit_password_hashing_md5 = + rabbit_auth_backend_internal:hashing_module_for_user( + #internal_user{ + hashing_algorithm = rabbit_password_hashing_md5 + }), + + rabbit_password_hashing_sha256 = + rabbit_auth_backend_internal:hashing_module_for_user( + #internal_user{ + hashing_algorithm = rabbit_password_hashing_sha256 + }), + + passed. + +change_password(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, change_password1, [Config]). + +change_password1(_Config) -> + UserName = <<"test_user">>, + Password = <<"test_password">>, + case rabbit_auth_backend_internal:lookup_user(UserName) of + {ok, _} -> rabbit_auth_backend_internal:delete_user(UserName); + _ -> ok + end, + ok = application:set_env(rabbit, password_hashing_module, + rabbit_password_hashing_md5), + ok = rabbit_auth_backend_internal:add_user(UserName, Password), + {ok, #auth_user{username = UserName}} = + rabbit_auth_backend_internal:user_login_authentication( + UserName, [{password, Password}]), + ok = application:set_env(rabbit, password_hashing_module, + rabbit_password_hashing_sha256), + {ok, #auth_user{username = UserName}} = + rabbit_auth_backend_internal:user_login_authentication( + UserName, [{password, Password}]), + + NewPassword = <<"test_password1">>, + ok = rabbit_auth_backend_internal:change_password(UserName, NewPassword), + {ok, #auth_user{username = UserName}} = + rabbit_auth_backend_internal:user_login_authentication( + UserName, [{password, NewPassword}]), + + {refused, _, [UserName]} = + rabbit_auth_backend_internal:user_login_authentication( + UserName, [{password, Password}]), + passed. + +%% ------------------------------------------------------------------- +%% rabbitmqctl. +%% ------------------------------------------------------------------- + +list_operations_timeout_pass(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, list_operations_timeout_pass1, [Config]). + +list_operations_timeout_pass1(Config) -> + %% create a few things so there is some useful information to list + {_Writer1, Limiter1, Ch1} = rabbit_ct_broker_helpers:test_channel(), + {_Writer2, Limiter2, Ch2} = rabbit_ct_broker_helpers:test_channel(), + + [Q, Q2] = [Queue || Name <- [<<"list_operations_timeout_pass-q1">>, + <<"list_operations_timeout_pass-q2">>], + {new, Queue = #amqqueue{}} <- + [rabbit_amqqueue:declare( + rabbit_misc:r(<<"/">>, queue, Name), + false, false, [], none)]], + + ok = rabbit_amqqueue:basic_consume( + Q, true, Ch1, Limiter1, false, 0, <<"ctag1">>, true, [], + undefined), + ok = rabbit_amqqueue:basic_consume( + Q2, true, Ch2, Limiter2, false, 0, <<"ctag2">>, true, [], + undefined), + + %% list users + ok = control_action(add_user, + ["list_operations_timeout_pass-user", + "list_operations_timeout_pass-password"]), + {error, {user_already_exists, _}} = + control_action(add_user, + ["list_operations_timeout_pass-user", + "list_operations_timeout_pass-password"]), + ok = control_action_t(list_users, [], ?TIMEOUT_LIST_OPS_PASS), + + %% list parameters + ok = dummy_runtime_parameters:register(), + ok = control_action(set_parameter, ["test", "good", "123"]), + ok = control_action_t(list_parameters, [], ?TIMEOUT_LIST_OPS_PASS), + ok = control_action(clear_parameter, ["test", "good"]), + dummy_runtime_parameters:unregister(), + + %% list vhosts + ok = control_action(add_vhost, ["/list_operations_timeout_pass-vhost"]), + {error, {vhost_already_exists, _}} = + control_action(add_vhost, ["/list_operations_timeout_pass-vhost"]), + ok = control_action_t(list_vhosts, [], ?TIMEOUT_LIST_OPS_PASS), + + %% list permissions + ok = control_action(set_permissions, + ["list_operations_timeout_pass-user", ".*", ".*", ".*"], + [{"-p", "/list_operations_timeout_pass-vhost"}]), + ok = control_action_t(list_permissions, [], + [{"-p", "/list_operations_timeout_pass-vhost"}], + ?TIMEOUT_LIST_OPS_PASS), + + %% list user permissions + ok = control_action_t(list_user_permissions, + ["list_operations_timeout_pass-user"], + ?TIMEOUT_LIST_OPS_PASS), + + %% list policies + ok = control_action_opts( + ["set_policy", "list_operations_timeout_pass-policy", ".*", + "{\"ha-mode\":\"all\"}"]), + ok = control_action_t(list_policies, [], ?TIMEOUT_LIST_OPS_PASS), + ok = control_action(clear_policy, ["list_operations_timeout_pass-policy"]), + + %% list queues + ok = info_action_t(list_queues, + rabbit_amqqueue:info_keys(), false, + ?TIMEOUT_LIST_OPS_PASS), + + %% list exchanges + ok = info_action_t(list_exchanges, + rabbit_exchange:info_keys(), true, + ?TIMEOUT_LIST_OPS_PASS), + + %% list bindings + ok = info_action_t(list_bindings, + rabbit_binding:info_keys(), true, + ?TIMEOUT_LIST_OPS_PASS), + + %% list connections + H = ?config(rmq_hostname, Config), + P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + {ok, C1} = gen_tcp:connect(H, P, [binary, {active, false}]), + gen_tcp:send(C1, <<"AMQP", 0, 0, 9, 1>>), + {ok, <<1,0,0>>} = gen_tcp:recv(C1, 3, 100), + + {ok, C2} = gen_tcp:connect(H, P, [binary, {active, false}]), + gen_tcp:send(C2, <<"AMQP", 0, 0, 9, 1>>), + {ok, <<1,0,0>>} = gen_tcp:recv(C2, 3, 100), + + ok = info_action_t( + list_connections, rabbit_networking:connection_info_keys(), false, + ?TIMEOUT_LIST_OPS_PASS), + + %% list consumers + ok = info_action_t( + list_consumers, rabbit_amqqueue:consumer_info_keys(), false, + ?TIMEOUT_LIST_OPS_PASS), + + %% list channels + ok = info_action_t( + list_channels, rabbit_channel:info_keys(), false, + ?TIMEOUT_LIST_OPS_PASS), + + %% do some cleaning up + ok = control_action(delete_user, ["list_operations_timeout_pass-user"]), + {error, {no_such_user, _}} = + control_action(delete_user, ["list_operations_timeout_pass-user"]), + + ok = control_action(delete_vhost, ["/list_operations_timeout_pass-vhost"]), + {error, {no_such_vhost, _}} = + control_action(delete_vhost, ["/list_operations_timeout_pass-vhost"]), + + %% close_connection + Conns = rabbit_ct_broker_helpers:get_connection_pids([C1, C2]), + [ok, ok] = [ok = control_action( + close_connection, [rabbit_misc:pid_to_string(ConnPid), "go away"]) + || ConnPid <- Conns], + + %% cleanup queues + [{ok, _} = rabbit_amqqueue:delete(QR, false, false) || QR <- [Q, Q2]], + + [begin + unlink(Chan), + ok = rabbit_channel:shutdown(Chan) + end || Chan <- [Ch1, Ch2]], + passed. + +user_management(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, user_management1, [Config]). + +user_management1(_Config) -> + + %% lots if stuff that should fail + {error, {no_such_user, _}} = + control_action(delete_user, + ["user_management-user"]), + {error, {no_such_user, _}} = + control_action(change_password, + ["user_management-user", "user_management-password"]), + {error, {no_such_vhost, _}} = + control_action(delete_vhost, + ["/user_management-vhost"]), + {error, {no_such_user, _}} = + control_action(set_permissions, + ["user_management-user", ".*", ".*", ".*"]), + {error, {no_such_user, _}} = + control_action(clear_permissions, + ["user_management-user"]), + {error, {no_such_user, _}} = + control_action(list_user_permissions, + ["user_management-user"]), + {error, {no_such_vhost, _}} = + control_action(list_permissions, [], + [{"-p", "/user_management-vhost"}]), + {error, {invalid_regexp, _, _}} = + control_action(set_permissions, + ["guest", "+foo", ".*", ".*"]), + {error, {no_such_user, _}} = + control_action(set_user_tags, + ["user_management-user", "bar"]), + + %% user creation + ok = control_action(add_user, + ["user_management-user", "user_management-password"]), + {error, {user_already_exists, _}} = + control_action(add_user, + ["user_management-user", "user_management-password"]), + ok = control_action(clear_password, + ["user_management-user"]), + ok = control_action(change_password, + ["user_management-user", "user_management-newpassword"]), + + TestTags = fun (Tags) -> + Args = ["user_management-user" | [atom_to_list(T) || T <- Tags]], + ok = control_action(set_user_tags, Args), + {ok, #internal_user{tags = Tags}} = + rabbit_auth_backend_internal:lookup_user( + <<"user_management-user">>), + ok = control_action(list_users, []) + end, + TestTags([foo, bar, baz]), + TestTags([administrator]), + TestTags([]), + + %% user authentication + ok = control_action(authenticate_user, + ["user_management-user", "user_management-newpassword"]), + {refused, _User, _Format, _Params} = + control_action(authenticate_user, + ["user_management-user", "user_management-password"]), + + %% vhost creation + ok = control_action(add_vhost, + ["/user_management-vhost"]), + {error, {vhost_already_exists, _}} = + control_action(add_vhost, + ["/user_management-vhost"]), + ok = control_action(list_vhosts, []), + + %% user/vhost mapping + ok = control_action(set_permissions, + ["user_management-user", ".*", ".*", ".*"], + [{"-p", "/user_management-vhost"}]), + ok = control_action(set_permissions, + ["user_management-user", ".*", ".*", ".*"], + [{"-p", "/user_management-vhost"}]), + ok = control_action(set_permissions, + ["user_management-user", ".*", ".*", ".*"], + [{"-p", "/user_management-vhost"}]), + ok = control_action(list_permissions, [], + [{"-p", "/user_management-vhost"}]), + ok = control_action(list_permissions, [], + [{"-p", "/user_management-vhost"}]), + ok = control_action(list_user_permissions, + ["user_management-user"]), + + %% user/vhost unmapping + ok = control_action(clear_permissions, + ["user_management-user"], [{"-p", "/user_management-vhost"}]), + ok = control_action(clear_permissions, + ["user_management-user"], [{"-p", "/user_management-vhost"}]), + + %% vhost deletion + ok = control_action(delete_vhost, + ["/user_management-vhost"]), + {error, {no_such_vhost, _}} = + control_action(delete_vhost, + ["/user_management-vhost"]), + + %% deleting a populated vhost + ok = control_action(add_vhost, + ["/user_management-vhost"]), + ok = control_action(set_permissions, + ["user_management-user", ".*", ".*", ".*"], + [{"-p", "/user_management-vhost"}]), + {new, _} = rabbit_amqqueue:declare( + rabbit_misc:r(<<"/user_management-vhost">>, queue, + <<"user_management-vhost-queue">>), + true, false, [], none), + ok = control_action(delete_vhost, + ["/user_management-vhost"]), + + %% user deletion + ok = control_action(delete_user, + ["user_management-user"]), + {error, {no_such_user, _}} = + control_action(delete_user, + ["user_management-user"]), + + passed. + +runtime_parameters(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, runtime_parameters1, [Config]). + +runtime_parameters1(_Config) -> + dummy_runtime_parameters:register(), + Good = fun(L) -> ok = control_action(set_parameter, L) end, + Bad = fun(L) -> {error_string, _} = control_action(set_parameter, L) end, + + %% Acceptable for bijection + Good(["test", "good", "\"ignore\""]), + Good(["test", "good", "123"]), + Good(["test", "good", "true"]), + Good(["test", "good", "false"]), + Good(["test", "good", "null"]), + Good(["test", "good", "{\"key\": \"value\"}"]), + + %% Invalid json + Bad(["test", "good", "atom"]), + Bad(["test", "good", "{\"foo\": \"bar\""]), + Bad(["test", "good", "{foo: \"bar\"}"]), + + %% Test actual validation hook + Good(["test", "maybe", "\"good\""]), + Bad(["test", "maybe", "\"bad\""]), + Good(["test", "admin", "\"ignore\""]), %% ctl means 'user' -> none + + ok = control_action(list_parameters, []), + + ok = control_action(clear_parameter, ["test", "good"]), + ok = control_action(clear_parameter, ["test", "maybe"]), + ok = control_action(clear_parameter, ["test", "admin"]), + {error_string, _} = + control_action(clear_parameter, ["test", "neverexisted"]), + + %% We can delete for a component that no longer exists + Good(["test", "good", "\"ignore\""]), + dummy_runtime_parameters:unregister(), + ok = control_action(clear_parameter, ["test", "good"]), + passed. + +policy_validation(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, policy_validation1, [Config]). + +policy_validation1(_Config) -> + PolicyName = "runtime_parameters-policy", + dummy_runtime_parameters:register_policy_validator(), + SetPol = fun (Key, Val) -> + control_action_opts( + ["set_policy", PolicyName, ".*", + rabbit_misc:format("{\"~s\":~p}", [Key, Val])]) + end, + OK = fun (Key, Val) -> + ok = SetPol(Key, Val), + true = does_policy_exist(PolicyName, + [{definition, [{list_to_binary(Key), Val}]}]) + end, + + OK("testeven", []), + OK("testeven", [1, 2]), + OK("testeven", [1, 2, 3, 4]), + OK("testpos", [2, 5, 5678]), + + {error_string, _} = SetPol("testpos", [-1, 0, 1]), + {error_string, _} = SetPol("testeven", [ 1, 2, 3]), + + ok = control_action(clear_policy, [PolicyName]), + dummy_runtime_parameters:unregister_policy_validator(), + passed. + +policy_opts_validation(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, policy_opts_validation1, [Config]). + +policy_opts_validation1(_Config) -> + PolicyName = "policy_opts_validation-policy", + Set = fun (Extra) -> control_action_opts( + ["set_policy", PolicyName, + ".*", "{\"ha-mode\":\"all\"}" + | Extra]) end, + OK = fun (Extra, Props) -> + ok = Set(Extra), + true = does_policy_exist(PolicyName, Props) + end, + Fail = fun (Extra) -> + case Set(Extra) of + {error_string, _} -> ok; + no_command when Extra =:= ["--priority"] -> ok; + no_command when Extra =:= ["--apply-to"] -> ok; + {'EXIT', + {function_clause, + [{rabbit_control_main,action, _, _} | _]}} + when Extra =:= ["--offline"] -> ok + end + end, + + OK ([], [{priority, 0}, {'apply-to', <<"all">>}]), + + OK (["--priority", "0"], [{priority, 0}]), + OK (["--priority", "3"], [{priority, 3}]), + Fail(["--priority", "banana"]), + Fail(["--priority"]), + + OK (["--apply-to", "all"], [{'apply-to', <<"all">>}]), + OK (["--apply-to", "queues"], [{'apply-to', <<"queues">>}]), + Fail(["--apply-to", "bananas"]), + Fail(["--apply-to"]), + + OK (["--priority", "3", "--apply-to", "queues"], [{priority, 3}, {'apply-to', <<"queues">>}]), + Fail(["--priority", "banana", "--apply-to", "queues"]), + Fail(["--priority", "3", "--apply-to", "bananas"]), + + Fail(["--offline"]), + + ok = control_action(clear_policy, [PolicyName]), + passed. + +ha_policy_validation(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, ha_policy_validation1, [Config]). + +ha_policy_validation1(_Config) -> + PolicyName = "ha_policy_validation-policy", + Set = fun (JSON) -> control_action_opts( + ["set_policy", PolicyName, + ".*", JSON]) end, + OK = fun (JSON, Def) -> + ok = Set(JSON), + true = does_policy_exist(PolicyName, [{definition, Def}]) + end, + Fail = fun (JSON) -> {error_string, _} = Set(JSON) end, + + OK ("{\"ha-mode\":\"all\"}", [{<<"ha-mode">>, <<"all">>}]), + Fail("{\"ha-mode\":\"made_up\"}"), + + Fail("{\"ha-mode\":\"nodes\"}"), + Fail("{\"ha-mode\":\"nodes\",\"ha-params\":2}"), + Fail("{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",2]}"), + OK ("{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",\"b\"]}", + [{<<"ha-mode">>, <<"nodes">>}, {<<"ha-params">>, [<<"a">>, <<"b">>]}]), + Fail("{\"ha-params\":[\"a\",\"b\"]}"), + + Fail("{\"ha-mode\":\"exactly\"}"), + Fail("{\"ha-mode\":\"exactly\",\"ha-params\":[\"a\",\"b\"]}"), + OK ("{\"ha-mode\":\"exactly\",\"ha-params\":2}", + [{<<"ha-mode">>, <<"exactly">>}, {<<"ha-params">>, 2}]), + Fail("{\"ha-params\":2}"), + + OK ("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"manual\"}", + [{<<"ha-mode">>, <<"all">>}, {<<"ha-sync-mode">>, <<"manual">>}]), + OK ("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"automatic\"}", + [{<<"ha-mode">>, <<"all">>}, {<<"ha-sync-mode">>, <<"automatic">>}]), + Fail("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"made_up\"}"), + Fail("{\"ha-sync-mode\":\"manual\"}"), + Fail("{\"ha-sync-mode\":\"automatic\"}"), + + ok = control_action(clear_policy, [PolicyName]), + passed. + +queue_master_location_policy_validation(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, queue_master_location_policy_validation1, [Config]). + +queue_master_location_policy_validation1(_Config) -> + PolicyName = "queue_master_location_policy_validation-policy", + Set = fun (JSON) -> + control_action_opts( + ["set_policy", PolicyName, ".*", JSON]) + end, + OK = fun (JSON, Def) -> + ok = Set(JSON), + true = does_policy_exist(PolicyName, [{definition, Def}]) + end, + Fail = fun (JSON) -> {error_string, _} = Set(JSON) end, + + OK ("{\"queue-master-locator\":\"min-masters\"}", + [{<<"queue-master-locator">>, <<"min-masters">>}]), + OK ("{\"queue-master-locator\":\"client-local\"}", + [{<<"queue-master-locator">>, <<"client-local">>}]), + OK ("{\"queue-master-locator\":\"random\"}", + [{<<"queue-master-locator">>, <<"random">>}]), + Fail("{\"queue-master-locator\":\"made_up\"}"), + + ok = control_action(clear_policy, [PolicyName]), + passed. + +queue_modes_policy_validation(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, queue_modes_policy_validation1, [Config]). + +queue_modes_policy_validation1(_Config) -> + PolicyName = "queue_modes_policy_validation-policy", + Set = fun (JSON) -> + control_action_opts( + ["set_policy", PolicyName, ".*", JSON]) + end, + OK = fun (JSON, Def) -> + ok = Set(JSON), + true = does_policy_exist(PolicyName, [{definition, Def}]) + end, + Fail = fun (JSON) -> {error_string, _} = Set(JSON) end, + + OK ("{\"queue-mode\":\"lazy\"}", + [{<<"queue-mode">>, <<"lazy">>}]), + OK ("{\"queue-mode\":\"default\"}", + [{<<"queue-mode">>, <<"default">>}]), + Fail("{\"queue-mode\":\"wrong\"}"), + + ok = control_action(clear_policy, [PolicyName]), + passed. + +vhost_removed_while_updating_policy(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, vhost_removed_while_updating_policy1, [Config]). + +vhost_removed_while_updating_policy1(_Config) -> + VHost = "/vhost_removed_while_updating_policy-vhost", + PolicyName = "vhost_removed_while_updating_policy-policy", + + ok = control_action(add_vhost, [VHost]), + ok = control_action_opts( + ["set_policy", "-p", VHost, PolicyName, ".*", "{\"ha-mode\":\"all\"}"]), + true = does_policy_exist(PolicyName, []), + + %% Removing the vhost triggers the deletion of the policy. Once + %% the policy and the vhost are actually removed, RabbitMQ calls + %% update_policies() which lists policies on the given vhost. This + %% obviously fails because the vhost is gone, but the call should + %% still succeed. + ok = control_action(delete_vhost, [VHost]), + false = does_policy_exist(PolicyName, []), + + passed. + +does_policy_exist(PolicyName, Props) -> + PolicyNameBin = list_to_binary(PolicyName), + Policies = lists:filter( + fun(Policy) -> + lists:member({name, PolicyNameBin}, Policy) + end, rabbit_policy:list()), + case Policies of + [Policy] -> check_policy_props(Policy, Props); + [] -> false; + _ -> false + end. + +check_policy_props(Policy, [Prop | Rest]) -> + case lists:member(Prop, Policy) of + true -> check_policy_props(Policy, Rest); + false -> false + end; +check_policy_props(_Policy, []) -> + true. + +server_status(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, server_status1, [Config]). + +server_status1(Config) -> + %% create a few things so there is some useful information to list + {_Writer, Limiter, Ch} = rabbit_ct_broker_helpers:test_channel(), + [Q, Q2] = [Queue || {Name, Owner} <- [{<<"server_status-q1">>, none}, + {<<"server_status-q2">>, self()}], + {new, Queue = #amqqueue{}} <- + [rabbit_amqqueue:declare( + rabbit_misc:r(<<"/">>, queue, Name), + false, false, [], Owner)]], + ok = rabbit_amqqueue:basic_consume( + Q, true, Ch, Limiter, false, 0, <<"ctag">>, true, [], undefined), + + %% list queues + ok = info_action(list_queues, + rabbit_amqqueue:info_keys(), true), + + %% as we have no way to collect output of + %% info_action/3 call, the only way we + %% can test individual queueinfoitems is by directly calling + %% rabbit_amqqueue:info/2 + [{exclusive, false}] = rabbit_amqqueue:info(Q, [exclusive]), + [{exclusive, true}] = rabbit_amqqueue:info(Q2, [exclusive]), + + %% list exchanges + ok = info_action(list_exchanges, + rabbit_exchange:info_keys(), true), + + %% list bindings + ok = info_action(list_bindings, + rabbit_binding:info_keys(), true), + %% misc binding listing APIs + [_|_] = rabbit_binding:list_for_source( + rabbit_misc:r(<<"/">>, exchange, <<"">>)), + [_] = rabbit_binding:list_for_destination( + rabbit_misc:r(<<"/">>, queue, <<"server_status-q1">>)), + [_] = rabbit_binding:list_for_source_and_destination( + rabbit_misc:r(<<"/">>, exchange, <<"">>), + rabbit_misc:r(<<"/">>, queue, <<"server_status-q1">>)), + + %% list connections + H = ?config(rmq_hostname, Config), + P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + {ok, C} = gen_tcp:connect(H, P, []), + gen_tcp:send(C, <<"AMQP", 0, 0, 9, 1>>), + timer:sleep(100), + ok = info_action(list_connections, + rabbit_networking:connection_info_keys(), false), + %% close_connection + [ConnPid] = rabbit_ct_broker_helpers:get_connection_pids([C]), + ok = control_action(close_connection, + [rabbit_misc:pid_to_string(ConnPid), "go away"]), + + %% list channels + ok = info_action(list_channels, rabbit_channel:info_keys(), false), + + %% list consumers + ok = control_action(list_consumers, []), + + %% set vm memory high watermark + HWM = vm_memory_monitor:get_vm_memory_high_watermark(), + ok = control_action(set_vm_memory_high_watermark, ["1"]), + ok = control_action(set_vm_memory_high_watermark, ["1.0"]), + %% this will trigger an alarm + ok = control_action(set_vm_memory_high_watermark, ["0.0"]), + %% reset + ok = control_action(set_vm_memory_high_watermark, [float_to_list(HWM)]), + + %% eval + {error_string, _} = control_action(eval, ["\""]), + {error_string, _} = control_action(eval, ["a("]), + ok = control_action(eval, ["a."]), + + %% cleanup + [{ok, _} = rabbit_amqqueue:delete(QR, false, false) || QR <- [Q, Q2]], + + unlink(Ch), + ok = rabbit_channel:shutdown(Ch), + + passed. + +amqp_connection_refusal(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, amqp_connection_refusal1, [Config]). + +amqp_connection_refusal1(Config) -> + H = ?config(rmq_hostname, Config), + P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + [passed = test_amqp_connection_refusal(H, P, V) || + V <- [<<"AMQP",9,9,9,9>>, <<"AMQP",0,1,0,0>>, <<"XXXX",0,0,9,1>>]], + passed. + +test_amqp_connection_refusal(H, P, Header) -> + {ok, C} = gen_tcp:connect(H, P, [binary, {active, false}]), + ok = gen_tcp:send(C, Header), + {ok, <<"AMQP",0,0,9,1>>} = gen_tcp:recv(C, 8, 100), + ok = gen_tcp:close(C), + passed. + +list_consumers_sanity_check(Config) -> + A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Chan = rabbit_ct_client_helpers:open_channel(Config, A), + %% this queue is not cleaned up because the entire node is + %% reset between tests + QName = <<"list_consumers_q">>, + #'queue.declare_ok'{} = amqp_channel:call(Chan, #'queue.declare'{queue = QName}), + + %% No consumers even if we have some queues + [] = rabbitmqctl_list_consumers(Config, A), + + %% Several consumers on single channel should be correctly reported + #'basic.consume_ok'{consumer_tag = CTag1} = amqp_channel:call(Chan, #'basic.consume'{queue = QName}), + #'basic.consume_ok'{consumer_tag = CTag2} = amqp_channel:call(Chan, #'basic.consume'{queue = QName}), + true = (lists:sort([CTag1, CTag2]) =:= + lists:sort(rabbitmqctl_list_consumers(Config, A))), + + %% `rabbitmqctl report` shares some code with `list_consumers`, so + %% check that it also reports both channels + {ok, ReportStdOut} = rabbit_ct_broker_helpers:rabbitmqctl(Config, A, + ["list_consumers"]), + ReportLines = re:split(ReportStdOut, <<"\n">>, [trim]), + ReportCTags = [lists:nth(3, re:split(Row, <<"\t">>)) || <<"list_consumers_q", _/binary>> = Row <- ReportLines], + true = (lists:sort([CTag1, CTag2]) =:= + lists:sort(ReportCTags)). + +rabbitmqctl_list_consumers(Config, Node) -> + {ok, StdOut} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Node, + ["list_consumers"]), + [<<"Listing consumers", _/binary>> | ConsumerRows] = re:split(StdOut, <<"\n">>, [trim]), + CTags = [ lists:nth(3, re:split(Row, <<"\t">>)) || Row <- ConsumerRows ], + CTags. + +list_queues_online_and_offline(Config) -> + [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + ACh = rabbit_ct_client_helpers:open_channel(Config, A), + %% Node B will be stopped + BCh = rabbit_ct_client_helpers:open_channel(Config, B), + #'queue.declare_ok'{} = amqp_channel:call(ACh, #'queue.declare'{queue = <<"q_a_1">>, durable = true}), + #'queue.declare_ok'{} = amqp_channel:call(ACh, #'queue.declare'{queue = <<"q_a_2">>, durable = true}), + #'queue.declare_ok'{} = amqp_channel:call(BCh, #'queue.declare'{queue = <<"q_b_1">>, durable = true}), + #'queue.declare_ok'{} = amqp_channel:call(BCh, #'queue.declare'{queue = <<"q_b_2">>, durable = true}), + + rabbit_ct_broker_helpers:rabbitmqctl(Config, B, ["stop"]), + + GotUp = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A, + ["list_queues", "--online", "name"])), + ExpectUp = [[<<"q_a_1">>], [<<"q_a_2">>]], + ExpectUp = GotUp, + + GotDown = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A, + ["list_queues", "--offline", "name"])), + ExpectDown = [[<<"q_b_1">>], [<<"q_b_2">>]], + ExpectDown = GotDown, + + GotAll = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A, + ["list_queues", "name"])), + ExpectAll = ExpectUp ++ ExpectDown, + ExpectAll = GotAll, + + ok. + +%% ------------------------------------------------------------------- +%% Statistics. +%% ------------------------------------------------------------------- + +channel_statistics(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, channel_statistics1, [Config]). + +channel_statistics1(_Config) -> + application:set_env(rabbit, collect_statistics, fine), + + %% ATM this just tests the queue / exchange stats in channels. That's + %% by far the most complex code though. + + %% Set up a channel and queue + {_Writer, Ch} = test_spawn(), + rabbit_channel:do(Ch, #'queue.declare'{}), + QName = receive #'queue.declare_ok'{queue = Q0} -> Q0 + after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok) + end, + QRes = rabbit_misc:r(<<"/">>, queue, QName), + X = rabbit_misc:r(<<"/">>, exchange, <<"">>), + + dummy_event_receiver:start(self(), [node()], [channel_stats]), + + %% Check stats empty + Event = test_ch_statistics_receive_event(Ch, fun (_) -> true end), + [] = proplists:get_value(channel_queue_stats, Event), + [] = proplists:get_value(channel_exchange_stats, Event), + [] = proplists:get_value(channel_queue_exchange_stats, Event), + + %% Publish and get a message + rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>, + routing_key = QName}, + rabbit_basic:build_content(#'P_basic'{}, <<"">>)), + rabbit_channel:do(Ch, #'basic.get'{queue = QName}), + + %% Check the stats reflect that + Event2 = test_ch_statistics_receive_event( + Ch, + fun (E) -> + length(proplists:get_value( + channel_queue_exchange_stats, E)) > 0 + end), + [{QRes, [{get,1}]}] = proplists:get_value(channel_queue_stats, Event2), + [{X,[{publish,1}]}] = proplists:get_value(channel_exchange_stats, Event2), + [{{QRes,X},[{publish,1}]}] = + proplists:get_value(channel_queue_exchange_stats, Event2), + + %% Check the stats remove stuff on queue deletion + rabbit_channel:do(Ch, #'queue.delete'{queue = QName}), + Event3 = test_ch_statistics_receive_event( + Ch, + fun (E) -> + length(proplists:get_value( + channel_queue_exchange_stats, E)) == 0 + end), + + [] = proplists:get_value(channel_queue_stats, Event3), + [{X,[{publish,1}]}] = proplists:get_value(channel_exchange_stats, Event3), + [] = proplists:get_value(channel_queue_exchange_stats, Event3), + + rabbit_channel:shutdown(Ch), + dummy_event_receiver:stop(), + passed. + +test_ch_statistics_receive_event(Ch, Matcher) -> + rabbit_channel:flush(Ch), + Ch ! emit_stats, + test_ch_statistics_receive_event1(Ch, Matcher). + +test_ch_statistics_receive_event1(Ch, Matcher) -> + receive #event{type = channel_stats, props = Props} -> + case Matcher(Props) of + true -> Props; + _ -> test_ch_statistics_receive_event1(Ch, Matcher) + end + after ?TIMEOUT -> throw(failed_to_receive_event) + end. + +head_message_timestamp_statistics(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, head_message_timestamp1, [Config]). + +head_message_timestamp1(_Config) -> + %% Can't find a way to receive the ack here so can't test pending acks status + + application:set_env(rabbit, collect_statistics, fine), + + %% Set up a channel and queue + {_Writer, Ch} = test_spawn(), + rabbit_channel:do(Ch, #'queue.declare'{}), + QName = receive #'queue.declare_ok'{queue = Q0} -> Q0 + after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok) + end, + QRes = rabbit_misc:r(<<"/">>, queue, QName), + + {ok, Q1} = rabbit_amqqueue:lookup(QRes), + QPid = Q1#amqqueue.pid, + + %% Set up event receiver for queue + dummy_event_receiver:start(self(), [node()], [queue_stats]), + + %% Check timestamp is empty when queue is empty + Event1 = test_queue_statistics_receive_event(QPid, fun (E) -> proplists:get_value(name, E) == QRes end), + '' = proplists:get_value(head_message_timestamp, Event1), + + %% Publish two messages and check timestamp is that of first message + rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>, + routing_key = QName}, + rabbit_basic:build_content(#'P_basic'{timestamp = 1}, <<"">>)), + rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>, + routing_key = QName}, + rabbit_basic:build_content(#'P_basic'{timestamp = 2}, <<"">>)), + Event2 = test_queue_statistics_receive_event(QPid, fun (E) -> proplists:get_value(name, E) == QRes end), + 1 = proplists:get_value(head_message_timestamp, Event2), + + %% Get first message and check timestamp is that of second message + rabbit_channel:do(Ch, #'basic.get'{queue = QName, no_ack = true}), + Event3 = test_queue_statistics_receive_event(QPid, fun (E) -> proplists:get_value(name, E) == QRes end), + 2 = proplists:get_value(head_message_timestamp, Event3), + + %% Get second message and check timestamp is empty again + rabbit_channel:do(Ch, #'basic.get'{queue = QName, no_ack = true}), + Event4 = test_queue_statistics_receive_event(QPid, fun (E) -> proplists:get_value(name, E) == QRes end), + '' = proplists:get_value(head_message_timestamp, Event4), + + %% Teardown + rabbit_channel:do(Ch, #'queue.delete'{queue = QName}), + rabbit_channel:shutdown(Ch), + dummy_event_receiver:stop(), + + passed. + +test_queue_statistics_receive_event(Q, Matcher) -> + %% Q ! emit_stats, + test_queue_statistics_receive_event1(Q, Matcher). + +test_queue_statistics_receive_event1(Q, Matcher) -> + receive #event{type = queue_stats, props = Props} -> + case Matcher(Props) of + true -> Props; + _ -> test_queue_statistics_receive_event1(Q, Matcher) + end + after ?TIMEOUT -> throw(failed_to_receive_event) + end. + +test_spawn() -> + {Writer, _Limiter, Ch} = rabbit_ct_broker_helpers:test_channel(), + ok = rabbit_channel:do(Ch, #'channel.open'{}), + receive #'channel.open_ok'{} -> ok + after ?TIMEOUT -> throw(failed_to_receive_channel_open_ok) + end, + {Writer, Ch}. + +test_spawn(Node) -> + rpc:call(Node, ?MODULE, test_spawn_remote, []). + +%% Spawn an arbitrary long lived process, so we don't end up linking +%% the channel to the short-lived process (RPC, here) spun up by the +%% RPC server. +test_spawn_remote() -> + RPC = self(), + spawn(fun () -> + {Writer, Ch} = test_spawn(), + RPC ! {Writer, Ch}, + link(Ch), + receive + _ -> ok + end + end), + receive Res -> Res + after ?TIMEOUT -> throw(failed_to_receive_result) + end. + +%% ------------------------------------------------------------------- +%% Topic matching. +%% ------------------------------------------------------------------- + +topic_matching(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, topic_matching1, [Config]). + +topic_matching1(_Config) -> + XName = #resource{virtual_host = <<"/">>, + kind = exchange, + name = <<"topic_matching-exchange">>}, + X0 = #exchange{name = XName, type = topic, durable = false, + auto_delete = false, arguments = []}, + X = rabbit_exchange_decorator:set(X0), + %% create + rabbit_exchange_type_topic:validate(X), + exchange_op_callback(X, create, []), + + %% add some bindings + Bindings = [#binding{source = XName, + key = list_to_binary(Key), + destination = #resource{virtual_host = <<"/">>, + kind = queue, + name = list_to_binary(Q)}, + args = Args} || + {Key, Q, Args} <- [{"a.b.c", "t1", []}, + {"a.*.c", "t2", []}, + {"a.#.b", "t3", []}, + {"a.b.b.c", "t4", []}, + {"#", "t5", []}, + {"#.#", "t6", []}, + {"#.b", "t7", []}, + {"*.*", "t8", []}, + {"a.*", "t9", []}, + {"*.b.c", "t10", []}, + {"a.#", "t11", []}, + {"a.#.#", "t12", []}, + {"b.b.c", "t13", []}, + {"a.b.b", "t14", []}, + {"a.b", "t15", []}, + {"b.c", "t16", []}, + {"", "t17", []}, + {"*.*.*", "t18", []}, + {"vodka.martini", "t19", []}, + {"a.b.c", "t20", []}, + {"*.#", "t21", []}, + {"#.*.#", "t22", []}, + {"*.#.#", "t23", []}, + {"#.#.#", "t24", []}, + {"*", "t25", []}, + {"#.b.#", "t26", []}, + {"args-test", "t27", + [{<<"foo">>, longstr, <<"bar">>}]}, + {"args-test", "t27", %% Note aliasing + [{<<"foo">>, longstr, <<"baz">>}]}]], + lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end, + Bindings), + + %% test some matches + test_topic_expect_match( + X, [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12", + "t18", "t20", "t21", "t22", "t23", "t24", + "t26"]}, + {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11", + "t12", "t15", "t21", "t22", "t23", "t24", + "t26"]}, + {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14", + "t18", "t21", "t22", "t23", "t24", "t26"]}, + {"", ["t5", "t6", "t17", "t24"]}, + {"b.c.c", ["t5", "t6", "t18", "t21", "t22", "t23", + "t24", "t26"]}, + {"a.a.a.a.a", ["t5", "t6", "t11", "t12", "t21", "t22", + "t23", "t24"]}, + {"vodka.gin", ["t5", "t6", "t8", "t21", "t22", "t23", + "t24"]}, + {"vodka.martini", ["t5", "t6", "t8", "t19", "t21", "t22", "t23", + "t24"]}, + {"b.b.c", ["t5", "t6", "t10", "t13", "t18", "t21", + "t22", "t23", "t24", "t26"]}, + {"nothing.here.at.all", ["t5", "t6", "t21", "t22", "t23", "t24"]}, + {"oneword", ["t5", "t6", "t21", "t22", "t23", "t24", + "t25"]}, + {"args-test", ["t5", "t6", "t21", "t22", "t23", "t24", + "t25", "t27"]}]), + %% remove some bindings + RemovedBindings = [lists:nth(1, Bindings), lists:nth(5, Bindings), + lists:nth(11, Bindings), lists:nth(19, Bindings), + lists:nth(21, Bindings), lists:nth(28, Bindings)], + exchange_op_callback(X, remove_bindings, [RemovedBindings]), + RemainingBindings = ordsets:to_list( + ordsets:subtract(ordsets:from_list(Bindings), + ordsets:from_list(RemovedBindings))), + + %% test some matches + test_topic_expect_match( + X, + [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22", + "t23", "t24", "t26"]}, + {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15", + "t22", "t23", "t24", "t26"]}, + {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22", + "t23", "t24", "t26"]}, + {"", ["t6", "t17", "t24"]}, + {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]}, + {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]}, + {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]}, + {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]}, + {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23", + "t24", "t26"]}, + {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]}, + {"oneword", ["t6", "t22", "t23", "t24", "t25"]}, + {"args-test", ["t6", "t22", "t23", "t24", "t25", "t27"]}]), + + %% remove the entire exchange + exchange_op_callback(X, delete, [RemainingBindings]), + %% none should match now + test_topic_expect_match(X, [{"a.b.c", []}, {"b.b.c", []}, {"", []}]), + passed. + +exchange_op_callback(X, Fun, Args) -> + rabbit_misc:execute_mnesia_transaction( + fun () -> rabbit_exchange:callback(X, Fun, transaction, [X] ++ Args) end), + rabbit_exchange:callback(X, Fun, none, [X] ++ Args). + +test_topic_expect_match(X, List) -> + lists:foreach( + fun ({Key, Expected}) -> + BinKey = list_to_binary(Key), + Message = rabbit_basic:message(X#exchange.name, BinKey, + #'P_basic'{}, <<>>), + Res = rabbit_exchange_type_topic:route( + X, #delivery{mandatory = false, + sender = self(), + message = Message}), + ExpectedRes = lists:map( + fun (Q) -> #resource{virtual_host = <<"/">>, + kind = queue, + name = list_to_binary(Q)} + end, Expected), + true = (lists:usort(ExpectedRes) =:= lists:usort(Res)) + end, List). + +%% --------------------------------------------------------------------------- +%% Unordered tests (originally from rabbit_tests.erl). +%% --------------------------------------------------------------------------- + +confirms(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, confirms1, [Config]). + +confirms1(_Config) -> + {_Writer, Ch} = test_spawn(), + DeclareBindDurableQueue = + fun() -> + rabbit_channel:do(Ch, #'queue.declare'{durable = true}), + receive #'queue.declare_ok'{queue = Q0} -> + rabbit_channel:do(Ch, #'queue.bind'{ + queue = Q0, + exchange = <<"amq.direct">>, + routing_key = "confirms-magic" }), + receive #'queue.bind_ok'{} -> Q0 + after ?TIMEOUT -> throw(failed_to_bind_queue) + end + after ?TIMEOUT -> throw(failed_to_declare_queue) + end + end, + %% Declare and bind two queues + QName1 = DeclareBindDurableQueue(), + QName2 = DeclareBindDurableQueue(), + %% Get the first one's pid (we'll crash it later) + {ok, Q1} = rabbit_amqqueue:lookup(rabbit_misc:r(<<"/">>, queue, QName1)), + QPid1 = Q1#amqqueue.pid, + %% Enable confirms + rabbit_channel:do(Ch, #'confirm.select'{}), + receive + #'confirm.select_ok'{} -> ok + after ?TIMEOUT -> throw(failed_to_enable_confirms) + end, + %% Publish a message + rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"amq.direct">>, + routing_key = "confirms-magic" + }, + rabbit_basic:build_content( + #'P_basic'{delivery_mode = 2}, <<"">>)), + %% We must not kill the queue before the channel has processed the + %% 'publish'. + ok = rabbit_channel:flush(Ch), + %% Crash the queue + QPid1 ! boom, + %% Wait for a nack + receive + #'basic.nack'{} -> ok; + #'basic.ack'{} -> throw(received_ack_instead_of_nack) + after ?TIMEOUT-> throw(did_not_receive_nack) + end, + receive + #'basic.ack'{} -> throw(received_ack_when_none_expected) + after 1000 -> ok + end, + %% Cleanup + rabbit_channel:do(Ch, #'queue.delete'{queue = QName2}), + receive + #'queue.delete_ok'{} -> ok + after ?TIMEOUT -> throw(failed_to_cleanup_queue) + end, + unlink(Ch), + ok = rabbit_channel:shutdown(Ch), + + passed. + +gen_server2_with_state(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, gen_server2_with_state1, [Config]). + +gen_server2_with_state1(_Config) -> + fhc_state = gen_server2:with_state(file_handle_cache, + fun (S) -> element(1, S) end), + passed. + +mcall(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, mcall1, [Config]). + +mcall1(_Config) -> + P1 = spawn(fun gs2_test_listener/0), + register(foo, P1), + global:register_name(gfoo, P1), + + P2 = spawn(fun() -> exit(bang) end), + %% ensure P2 is dead (ignore the race setting up the monitor) + await_exit(P2), + + P3 = spawn(fun gs2_test_crasher/0), + + %% since P2 crashes almost immediately and P3 after receiving its first + %% message, we have to spawn a few more processes to handle the additional + %% cases we're interested in here + register(baz, spawn(fun gs2_test_crasher/0)), + register(bog, spawn(fun gs2_test_crasher/0)), + global:register_name(gbaz, spawn(fun gs2_test_crasher/0)), + + NoNode = rabbit_nodes:make("nonode"), + + Targets = + %% pids + [P1, P2, P3] + ++ + %% registered names + [foo, bar, baz] + ++ + %% {Name, Node} pairs + [{foo, node()}, {bar, node()}, {bog, node()}, {foo, NoNode}] + ++ + %% {global, Name} + [{global, gfoo}, {global, gbar}, {global, gbaz}], + + GoodResults = [{D, goodbye} || D <- [P1, foo, + {foo, node()}, + {global, gfoo}]], + + BadResults = [{P2, noproc}, % died before use + {P3, boom}, % died on first use + {bar, noproc}, % never registered + {baz, boom}, % died on first use + {{bar, node()}, noproc}, % never registered + {{bog, node()}, boom}, % died on first use + {{foo, NoNode}, nodedown}, % invalid node + {{global, gbar}, noproc}, % never registered globally + {{global, gbaz}, boom}], % died on first use + + {Replies, Errors} = gen_server2:mcall([{T, hello} || T <- Targets]), + true = lists:sort(Replies) == lists:sort(GoodResults), + true = lists:sort(Errors) == lists:sort(BadResults), + + %% cleanup (ignore the race setting up the monitor) + P1 ! stop, + await_exit(P1), + passed. + +await_exit(Pid) -> + MRef = erlang:monitor(process, Pid), + receive + {'DOWN', MRef, _, _, _} -> ok + end. + +gs2_test_crasher() -> + receive + {'$gen_call', _From, hello} -> exit(boom) + end. + +gs2_test_listener() -> + receive + {'$gen_call', From, hello} -> + gen_server2:reply(From, goodbye), + gs2_test_listener(); + stop -> + ok + end. + +configurable_server_properties(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, configurable_server_properties1, [Config]). + +configurable_server_properties1(_Config) -> + %% List of the names of the built-in properties do we expect to find + BuiltInPropNames = [<<"product">>, <<"version">>, <<"platform">>, + <<"copyright">>, <<"information">>], + + Protocol = rabbit_framing_amqp_0_9_1, + + %% Verify that the built-in properties are initially present + ActualPropNames = [Key || {Key, longstr, _} <- + rabbit_reader:server_properties(Protocol)], + true = lists:all(fun (X) -> lists:member(X, ActualPropNames) end, + BuiltInPropNames), + + %% Get the initial server properties configured in the environment + {ok, ServerProperties} = application:get_env(rabbit, server_properties), + + %% Helper functions + ConsProp = fun (X) -> application:set_env(rabbit, + server_properties, + [X | ServerProperties]) end, + IsPropPresent = + fun (X) -> + lists:member(X, rabbit_reader:server_properties(Protocol)) + end, + + %% Add a wholly new property of the simplified {KeyAtom, StringValue} form + NewSimplifiedProperty = {NewHareKey, NewHareVal} = {hare, "soup"}, + ConsProp(NewSimplifiedProperty), + %% Do we find hare soup, appropriately formatted in the generated properties? + ExpectedHareImage = {list_to_binary(atom_to_list(NewHareKey)), + longstr, + list_to_binary(NewHareVal)}, + true = IsPropPresent(ExpectedHareImage), + + %% Add a wholly new property of the {BinaryKey, Type, Value} form + %% and check for it + NewProperty = {<<"new-bin-key">>, signedint, -1}, + ConsProp(NewProperty), + %% Do we find the new property? + true = IsPropPresent(NewProperty), + + %% Add a property that clobbers a built-in, and verify correct clobbering + {NewVerKey, NewVerVal} = NewVersion = {version, "X.Y.Z."}, + {BinNewVerKey, BinNewVerVal} = {list_to_binary(atom_to_list(NewVerKey)), + list_to_binary(NewVerVal)}, + ConsProp(NewVersion), + ClobberedServerProps = rabbit_reader:server_properties(Protocol), + %% Is the clobbering insert present? + true = IsPropPresent({BinNewVerKey, longstr, BinNewVerVal}), + %% Is the clobbering insert the only thing with the clobbering key? + [{BinNewVerKey, longstr, BinNewVerVal}] = + [E || {K, longstr, _V} = E <- ClobberedServerProps, K =:= BinNewVerKey], + + application:set_env(rabbit, server_properties, ServerProperties), + passed. + +memory_high_watermark(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, memory_high_watermark1, [Config]). + +memory_high_watermark1(_Config) -> + %% set vm memory high watermark + HWM = vm_memory_monitor:get_vm_memory_high_watermark(), + %% this will trigger an alarm + ok = control_action(set_vm_memory_high_watermark, + ["absolute", "2000"]), + [{{resource_limit,memory,_},[]}] = rabbit_alarm:get_alarms(), + %% reset + ok = control_action(set_vm_memory_high_watermark, + [float_to_list(HWM)]), + + passed. + +set_disk_free_limit_command(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, set_disk_free_limit_command1, [Config]). + +set_disk_free_limit_command1(_Config) -> + ok = control_action(set_disk_free_limit, + ["2000kiB"]), + 2048000 = rabbit_disk_monitor:get_disk_free_limit(), + ok = control_action(set_disk_free_limit, + ["mem_relative", "1.1"]), + ExpectedLimit = 1.1 * vm_memory_monitor:get_total_memory(), + % Total memory is unstable, so checking order + true = ExpectedLimit/rabbit_disk_monitor:get_disk_free_limit() < 1.2, + true = ExpectedLimit/rabbit_disk_monitor:get_disk_free_limit() > 0.98, + ok = control_action(set_disk_free_limit, ["50MB"]), + passed. + +disk_monitor(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, disk_monitor1, [Config]). + +disk_monitor1(_Config) -> + %% Issue: rabbitmq-server #91 + %% os module could be mocked using 'unstick', however it may have undesired + %% side effects in following tests. Thus, we mock at rabbit_misc level + ok = meck:new(rabbit_misc, [passthrough]), + ok = meck:expect(rabbit_misc, os_cmd, fun(_) -> "\n" end), + ok = rabbit_sup:stop_child(rabbit_disk_monitor_sup), + ok = rabbit_sup:start_delayed_restartable_child(rabbit_disk_monitor, [1000]), + meck:unload(rabbit_misc), + passed. + +disconnect_detected_during_alarm(Config) -> + A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + + %% Set a low memory high watermark. + rabbit_ct_broker_helpers:rabbitmqctl(Config, A, + ["set_vm_memory_high_watermark", "0.000000001"]), + + %% Open a connection and a channel. + Port = rabbit_ct_broker_helpers:get_node_config(Config, A, tcp_port_amqp), + Heartbeat = 1, + {ok, Conn} = amqp_connection:start( + #amqp_params_network{port = Port, + heartbeat = Heartbeat}), + {ok, Ch} = amqp_connection:open_channel(Conn), + + amqp_connection:register_blocked_handler(Conn, self()), + Publish = #'basic.publish'{routing_key = <<"nowhere-to-go">>}, + amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}), + receive + % Check that connection was indeed blocked + #'connection.blocked'{} -> ok + after + 1000 -> exit(connection_was_not_blocked) + end, + + %% Connection is blocked, now we should forcefully kill it + {'EXIT', _} = (catch amqp_connection:close(Conn, 10)), + + ListConnections = + fun() -> + rpc:call(A, rabbit_networking, connection_info_all, []) + end, + + %% We've already disconnected, but blocked connection still should still linger on. + [SingleConn] = ListConnections(), + blocked = rabbit_misc:pget(state, SingleConn), + + %% It should definitely go away after 2 heartbeat intervals. + timer:sleep(round(2.5 * 1000 * Heartbeat)), + [] = ListConnections(), + + passed. + +%% --------------------------------------------------------------------------- +%% Cluster-dependent tests. +%% --------------------------------------------------------------------------- + +delegates_async(Config) -> + {I, J} = ?config(test_direction, Config), + From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename), + To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename), + rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE), + passed = rabbit_ct_broker_helpers:rpc(Config, + From, ?MODULE, delegates_async1, [Config, To]). + +delegates_async1(_Config, SecondaryNode) -> + Self = self(), + Sender = fun (Pid) -> Pid ! {invoked, Self} end, + + Responder = make_responder(fun ({invoked, Pid}) -> Pid ! response end), + + ok = delegate:invoke_no_result(spawn(Responder), Sender), + ok = delegate:invoke_no_result(spawn(SecondaryNode, Responder), Sender), + await_response(2), + + LocalPids = spawn_responders(node(), Responder, 10), + RemotePids = spawn_responders(SecondaryNode, Responder, 10), + ok = delegate:invoke_no_result(LocalPids ++ RemotePids, Sender), + await_response(20), + + passed. + +delegates_sync(Config) -> + {I, J} = ?config(test_direction, Config), + From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename), + To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename), + rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE), + passed = rabbit_ct_broker_helpers:rpc(Config, + From, ?MODULE, delegates_sync1, [Config, To]). + +delegates_sync1(_Config, SecondaryNode) -> + Sender = fun (Pid) -> gen_server:call(Pid, invoked, infinity) end, + BadSender = fun (_Pid) -> exit(exception) end, + + Responder = make_responder(fun ({'$gen_call', From, invoked}) -> + gen_server:reply(From, response) + end), + + BadResponder = make_responder(fun ({'$gen_call', From, invoked}) -> + gen_server:reply(From, response) + end, bad_responder_died), + + response = delegate:invoke(spawn(Responder), Sender), + response = delegate:invoke(spawn(SecondaryNode, Responder), Sender), + + must_exit(fun () -> delegate:invoke(spawn(BadResponder), BadSender) end), + must_exit(fun () -> + delegate:invoke(spawn(SecondaryNode, BadResponder), BadSender) end), + + LocalGoodPids = spawn_responders(node(), Responder, 2), + RemoteGoodPids = spawn_responders(SecondaryNode, Responder, 2), + LocalBadPids = spawn_responders(node(), BadResponder, 2), + RemoteBadPids = spawn_responders(SecondaryNode, BadResponder, 2), + + {GoodRes, []} = delegate:invoke(LocalGoodPids ++ RemoteGoodPids, Sender), + true = lists:all(fun ({_, response}) -> true end, GoodRes), + GoodResPids = [Pid || {Pid, _} <- GoodRes], + + Good = lists:usort(LocalGoodPids ++ RemoteGoodPids), + Good = lists:usort(GoodResPids), + + {[], BadRes} = delegate:invoke(LocalBadPids ++ RemoteBadPids, BadSender), + true = lists:all(fun ({_, {exit, exception, _}}) -> true end, BadRes), + BadResPids = [Pid || {Pid, _} <- BadRes], + + Bad = lists:usort(LocalBadPids ++ RemoteBadPids), + Bad = lists:usort(BadResPids), + + MagicalPids = [rabbit_misc:string_to_pid(Str) || + Str <- ["", ""]], + {[], BadNodes} = delegate:invoke(MagicalPids, Sender), + true = lists:all( + fun ({_, {exit, {nodedown, nonode@nohost}, _Stack}}) -> true end, + BadNodes), + BadNodesPids = [Pid || {Pid, _} <- BadNodes], + + Magical = lists:usort(MagicalPids), + Magical = lists:usort(BadNodesPids), + + passed. + +queue_cleanup(Config) -> + {I, J} = ?config(test_direction, Config), + From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename), + To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename), + rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE), + passed = rabbit_ct_broker_helpers:rpc(Config, + From, ?MODULE, queue_cleanup1, [Config, To]). + +queue_cleanup1(_Config, _SecondaryNode) -> + {_Writer, Ch} = test_spawn(), + rabbit_channel:do(Ch, #'queue.declare'{ queue = ?CLEANUP_QUEUE_NAME }), + receive #'queue.declare_ok'{queue = ?CLEANUP_QUEUE_NAME} -> + ok + after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok) + end, + rabbit_channel:shutdown(Ch), + rabbit:stop(), + rabbit:start(), + {_Writer2, Ch2} = test_spawn(), + rabbit_channel:do(Ch2, #'queue.declare'{ passive = true, + queue = ?CLEANUP_QUEUE_NAME }), + receive + #'channel.close'{reply_code = ?NOT_FOUND} -> + ok + after ?TIMEOUT -> throw(failed_to_receive_channel_exit) + end, + rabbit_channel:shutdown(Ch2), + passed. + +declare_on_dead_queue(Config) -> + {I, J} = ?config(test_direction, Config), + From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename), + To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename), + rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE), + passed = rabbit_ct_broker_helpers:rpc(Config, + From, ?MODULE, declare_on_dead_queue1, [Config, To]). + +declare_on_dead_queue1(_Config, SecondaryNode) -> + QueueName = rabbit_misc:r(<<"/">>, queue, ?CLEANUP_QUEUE_NAME), + Self = self(), + Pid = spawn(SecondaryNode, + fun () -> + {new, #amqqueue{name = QueueName, pid = QPid}} = + rabbit_amqqueue:declare(QueueName, false, false, [], + none), + exit(QPid, kill), + Self ! {self(), killed, QPid} + end), + receive + {Pid, killed, OldPid} -> + Q = dead_queue_loop(QueueName, OldPid), + {ok, 0} = rabbit_amqqueue:delete(Q, false, false), + passed + after ?TIMEOUT -> throw(failed_to_create_and_kill_queue) + end. + +refresh_events(Config) -> + {I, J} = ?config(test_direction, Config), + From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename), + To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename), + rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE), + passed = rabbit_ct_broker_helpers:rpc(Config, + From, ?MODULE, refresh_events1, [Config, To]). + +refresh_events1(Config, SecondaryNode) -> + dummy_event_receiver:start(self(), [node(), SecondaryNode], + [channel_created, queue_created]), + + {_Writer, Ch} = test_spawn(), + expect_events(pid, Ch, channel_created), + rabbit_channel:shutdown(Ch), + + {_Writer2, Ch2} = test_spawn(SecondaryNode), + expect_events(pid, Ch2, channel_created), + rabbit_channel:shutdown(Ch2), + + {new, #amqqueue{name = QName} = Q} = + rabbit_amqqueue:declare(queue_name(Config, <<"refresh_events-q">>), + false, false, [], none), + expect_events(name, QName, queue_created), + rabbit_amqqueue:delete(Q, false, false), + + dummy_event_receiver:stop(), + passed. + +make_responder(FMsg) -> make_responder(FMsg, timeout). +make_responder(FMsg, Throw) -> + fun () -> + receive Msg -> FMsg(Msg) + after ?TIMEOUT -> throw(Throw) + end + end. + +spawn_responders(Node, Responder, Count) -> + [spawn(Node, Responder) || _ <- lists:seq(1, Count)]. + +await_response(0) -> + ok; +await_response(Count) -> + receive + response -> ok, + await_response(Count - 1) + after ?TIMEOUT -> throw(timeout) + end. + +must_exit(Fun) -> + try + Fun(), + throw(exit_not_thrown) + catch + exit:_ -> ok + end. + +dead_queue_loop(QueueName, OldPid) -> + {existing, Q} = rabbit_amqqueue:declare(QueueName, false, false, [], none), + case Q#amqqueue.pid of + OldPid -> timer:sleep(25), + dead_queue_loop(QueueName, OldPid); + _ -> true = rabbit_misc:is_process_alive(Q#amqqueue.pid), + Q + end. + +expect_events(Tag, Key, Type) -> + expect_event(Tag, Key, Type), + rabbit:force_event_refresh(make_ref()), + expect_event(Tag, Key, Type). + +expect_event(Tag, Key, Type) -> + receive #event{type = Type, props = Props} -> + case rabbit_misc:pget(Tag, Props) of + Key -> ok; + _ -> expect_event(Tag, Key, Type) + end + after ?TIMEOUT -> throw({failed_to_receive_event, Type}) + end. + +%% --------------------------------------------------------------------------- +%% rabbitmqctl helpers. +%% --------------------------------------------------------------------------- + +control_action(Command, Args) -> + control_action(Command, node(), Args, default_options()). + +control_action(Command, Args, NewOpts) -> + control_action(Command, node(), Args, + expand_options(default_options(), NewOpts)). + +control_action(Command, Node, Args, Opts) -> + case catch rabbit_control_main:action( + Command, Node, Args, Opts, + fun (Format, Args1) -> + io:format(Format ++ " ...~n", Args1) + end) of + ok -> + io:format("done.~n"), + ok; + {ok, Result} -> + rabbit_control_misc:print_cmd_result(Command, Result), + ok; + Other -> + io:format("failed: ~p~n", [Other]), + Other + end. + +control_action_t(Command, Args, Timeout) when is_number(Timeout) -> + control_action_t(Command, node(), Args, default_options(), Timeout). + +control_action_t(Command, Args, NewOpts, Timeout) when is_number(Timeout) -> + control_action_t(Command, node(), Args, + expand_options(default_options(), NewOpts), + Timeout). + +control_action_t(Command, Node, Args, Opts, Timeout) when is_number(Timeout) -> + case catch rabbit_control_main:action( + Command, Node, Args, Opts, + fun (Format, Args1) -> + io:format(Format ++ " ...~n", Args1) + end, Timeout) of + ok -> + io:format("done.~n"), + ok; + {ok, Result} -> + rabbit_control_misc:print_cmd_result(Command, Result), + ok; + Other -> + io:format("failed: ~p~n", [Other]), + Other + end. + +control_action_opts(Raw) -> + NodeStr = atom_to_list(node()), + case rabbit_control_main:parse_arguments(Raw, NodeStr) of + {ok, {Cmd, Opts, Args}} -> + case control_action(Cmd, node(), Args, Opts) of + ok -> ok; + Error -> Error + end; + Error -> + Error + end. + +info_action(Command, Args, CheckVHost) -> + ok = control_action(Command, []), + if CheckVHost -> ok = control_action(Command, [], ["-p", "/"]); + true -> ok + end, + ok = control_action(Command, lists:map(fun atom_to_list/1, Args)), + {bad_argument, dummy} = control_action(Command, ["dummy"]), + ok. + +info_action_t(Command, Args, CheckVHost, Timeout) when is_number(Timeout) -> + if CheckVHost -> ok = control_action_t(Command, [], ["-p", "/"], Timeout); + true -> ok + end, + ok = control_action_t(Command, lists:map(fun atom_to_list/1, Args), Timeout), + ok. + +default_options() -> [{"-p", "/"}, {"-q", "false"}]. + +expand_options(As, Bs) -> + lists:foldl(fun({K, _}=A, R) -> + case proplists:is_defined(K, R) of + true -> R; + false -> [A | R] + end + end, Bs, As). From 83643e45cc43a0f852cc60b27ca3e015635854ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Wed, 20 Apr 2016 16:10:51 +0200 Subject: [PATCH 116/174] Switch testsuite to common_test, part #3 The migrated tests are those from `$(WITH_BROKER_TEST_COMMANDS)` and `$(STANDALONE_TEST_COMMANDS)`. References #725. [#116526487] --- test/channel_operation_timeout_SUITE.erl | 196 ++ test/channel_operation_timeout_test_queue.erl | 2443 +++++++++++++++++ test/cluster_rename_SUITE.erl | 304 ++ test/clustering_management_SUITE.erl | 728 +++++ test/crashing_queues_SUITE.erl | 269 ++ test/dynamic_ha_SUITE.erl | 329 +++ test/eager_sync_SUITE.erl | 278 ++ test/inet_proxy_dist.erl | 201 ++ test/inet_tcp_proxy.erl | 134 + test/inet_tcp_proxy_manager.erl | 107 + test/lazy_queue_SUITE.erl | 224 ++ test/many_node_ha_SUITE.erl | 117 + test/partitions_SUITE.erl | 438 +++ test/priority_queue_SUITE.erl | 558 ++++ test/queue_master_location_SUITE.erl | 271 ++ test/rabbit_ha_test_consumer.erl | 114 + test/rabbit_ha_test_producer.erl | 119 + test/simple_ha_SUITE.erl | 216 ++ test/sync_detection_SUITE.erl | 252 ++ 19 files changed, 7298 insertions(+) create mode 100644 test/channel_operation_timeout_SUITE.erl create mode 100644 test/channel_operation_timeout_test_queue.erl create mode 100644 test/cluster_rename_SUITE.erl create mode 100644 test/clustering_management_SUITE.erl create mode 100644 test/crashing_queues_SUITE.erl create mode 100644 test/dynamic_ha_SUITE.erl create mode 100644 test/eager_sync_SUITE.erl create mode 100644 test/inet_proxy_dist.erl create mode 100644 test/inet_tcp_proxy.erl create mode 100644 test/inet_tcp_proxy_manager.erl create mode 100644 test/lazy_queue_SUITE.erl create mode 100644 test/many_node_ha_SUITE.erl create mode 100644 test/partitions_SUITE.erl create mode 100644 test/priority_queue_SUITE.erl create mode 100644 test/queue_master_location_SUITE.erl create mode 100644 test/rabbit_ha_test_consumer.erl create mode 100644 test/rabbit_ha_test_producer.erl create mode 100644 test/simple_ha_SUITE.erl create mode 100644 test/sync_detection_SUITE.erl diff --git a/test/channel_operation_timeout_SUITE.erl b/test/channel_operation_timeout_SUITE.erl new file mode 100644 index 000000000000..7b41b9c225b1 --- /dev/null +++ b/test/channel_operation_timeout_SUITE.erl @@ -0,0 +1,196 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. +%% + +-module(channel_operation_timeout_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile([export_all]). + +-import(rabbit_misc, [pget/2]). + +-define(CONFIG, [cluster_ab]). +-define(DEFAULT_VHOST, <<"/">>). +-define(QRESOURCE(Q), rabbit_misc:r(?DEFAULT_VHOST, queue, Q)). +-define(TIMEOUT_TEST_MSG, <<"timeout_test_msg!">>). +-define(DELAY, 25). + +all() -> + [ + notify_down_all + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + ClusterSize = 2, + TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_count, ClusterSize}, + {rmq_nodename_suffix, Testcase}, + {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +notify_down_all(Config) -> + Rabbit = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + RabbitCh = rabbit_ct_client_helpers:open_channel(Config, 0), + HareCh = rabbit_ct_client_helpers:open_channel(Config, 1), + + %% success + set_channel_operation_timeout_config(Config, 1000), + configure_bq(Config), + QCfg0 = qconfig(RabbitCh, <<"q0">>, <<"ex0">>, true, false), + declare(QCfg0), + %% Testing rabbit_amqqueue:notify_down_all via rabbit_channel. + %% Consumer count = 0 after correct channel termination and + %% notification of queues via delagate:call/3 + true = (0 =/= length(get_consumers(Config, Rabbit, ?DEFAULT_VHOST))), + rabbit_ct_client_helpers:close_channel(RabbitCh), + 0 = length(get_consumers(Config, Rabbit, ?DEFAULT_VHOST)), + false = is_process_alive(RabbitCh), + + %% fail + set_channel_operation_timeout_config(Config, 10), + QCfg2 = qconfig(HareCh, <<"q1">>, <<"ex1">>, true, false), + declare(QCfg2), + publish(QCfg2, ?TIMEOUT_TEST_MSG), + timer:sleep(?DELAY), + rabbit_ct_client_helpers:close_channel(HareCh), + timer:sleep(?DELAY), + false = is_process_alive(HareCh), + + pass. + +%% ------------------------- +%% Internal helper functions +%% ------------------------- + +set_channel_operation_timeout_config(Config, Timeout) -> + [ok = Ret + || Ret <- rabbit_ct_broker_helpers:rpc_all(Config, + application, set_env, [rabbit, channel_operation_timeout, Timeout])], + ok. + +set_channel_operation_backing_queue(Config) -> + [ok = Ret + || Ret <- rabbit_ct_broker_helpers:rpc_all(Config, + application, set_env, + [rabbit, backing_queue_module, channel_operation_timeout_test_queue])], + ok. + +re_enable_priority_queue(Config) -> + [ok = Ret + || Ret <- rabbit_ct_broker_helpers:rpc_all(Config, + rabbit_priority_queue, enable, [])], + ok. + +declare(QCfg) -> + QDeclare = #'queue.declare'{queue = Q = pget(name, QCfg), durable = true}, + #'queue.declare_ok'{} = amqp_channel:call(Ch = pget(ch, QCfg), QDeclare), + + ExDeclare = #'exchange.declare'{exchange = Ex = pget(ex, QCfg)}, + #'exchange.declare_ok'{} = amqp_channel:call(Ch, ExDeclare), + + #'queue.bind_ok'{} = + amqp_channel:call(Ch, #'queue.bind'{queue = Q, + exchange = Ex, + routing_key = Q}), + maybe_subscribe(QCfg). + +maybe_subscribe(QCfg) -> + case pget(consume, QCfg) of + true -> + Sub = #'basic.consume'{queue = pget(name, QCfg)}, + Ch = pget(ch, QCfg), + Del = pget(deliver, QCfg), + amqp_channel:subscribe(Ch, Sub, + spawn(fun() -> consume(Ch, Del) end)); + _ -> ok + end. + +consume(_Ch, false) -> receive_nothing(); +consume(Ch, Deliver = true) -> + receive + {#'basic.deliver'{}, _Msg} -> + consume(Ch, Deliver) + end. + +publish(QCfg, Msg) -> + Publish = #'basic.publish'{exchange = pget(ex, QCfg), + routing_key = pget(name, QCfg)}, + amqp_channel:call(pget(ch, QCfg), Publish, + #amqp_msg{payload = Msg}). + +get_consumers(Config, Node, VHost) when is_atom(Node), + is_binary(VHost) -> + rabbit_ct_broker_helpers:rpc(Config, Node, + rabbit_amqqueue, consumers_all, [VHost]). + +get_amqqueue(Q, []) -> throw({not_found, Q}); +get_amqqueue(Q, [AMQQ = #amqqueue{name = Q} | _]) -> AMQQ; +get_amqqueue(Q, [_| Rem]) -> get_amqqueue(Q, Rem). + +qconfig(Ch, Name, Ex, Consume, Deliver) -> + [{ch, Ch}, {name, Name}, {ex,Ex}, {consume, Consume}, {deliver, Deliver}]. + +receive_nothing() -> + receive + after infinity -> void + end. + +unhandled_req(Fun) -> + try + Fun() + catch + exit:{{shutdown,{_, ?NOT_FOUND, _}}, _} -> ok; + _:Reason -> {error, Reason} + end. + +configure_bq(Config) -> + ok = set_channel_operation_backing_queue(Config), + ok = re_enable_priority_queue(Config), + ok = rabbit_ct_broker_helpers:add_code_path_to_all_nodes(Config, + ?MODULE). diff --git a/test/channel_operation_timeout_test_queue.erl b/test/channel_operation_timeout_test_queue.erl new file mode 100644 index 000000000000..55cd5f42fa55 --- /dev/null +++ b/test/channel_operation_timeout_test_queue.erl @@ -0,0 +1,2443 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(channel_operation_timeout_test_queue). + +-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1, + purge/1, purge_acks/1, + publish/6, publish_delivered/5, + batch_publish/4, batch_publish_delivered/4, + discard/4, drain_confirmed/1, + dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2, + ackfold/4, fold/3, len/1, is_empty/1, depth/1, + set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1, + handle_pre_hibernate/1, resume/1, msg_rates/1, + info/2, invoke/3, is_duplicate/2, set_queue_mode/2, + zip_msgs_and_acks/4, multiple_routing_keys/0]). + +-export([start/1, stop/0]). + +%% exported for testing only +-export([start_msg_store/2, stop_msg_store/0, init/6]). + +%%---------------------------------------------------------------------------- +%% This test backing queue follows the variable queue implementation, with +%% the exception that it will introduce infinite delays on some operations if +%% the test message has been published, and is awaiting acknowledgement in the +%% queue index. Test message is "timeout_test_msg!". +%% +%%---------------------------------------------------------------------------- + +-behaviour(rabbit_backing_queue). + +-record(vqstate, + { q1, + q2, + delta, + q3, + q4, + next_seq_id, + ram_pending_ack, %% msgs using store, still in RAM + disk_pending_ack, %% msgs in store, paged out + qi_pending_ack, %% msgs using qi, *can't* be paged out + index_state, + msg_store_clients, + durable, + transient_threshold, + qi_embed_msgs_below, + + len, %% w/o unacked + bytes, %% w/o unacked + unacked_bytes, + persistent_count, %% w unacked + persistent_bytes, %% w unacked + + target_ram_count, + ram_msg_count, %% w/o unacked + ram_msg_count_prev, + ram_ack_count_prev, + ram_bytes, %% w unacked + out_counter, + in_counter, + rates, + msgs_on_disk, + msg_indices_on_disk, + unconfirmed, + confirmed, + ack_out_counter, + ack_in_counter, + %% Unlike the other counters these two do not feed into + %% #rates{} and get reset + disk_read_count, + disk_write_count, + + io_batch_size, + + %% default queue or lazy queue + mode + }). + +-record(rates, { in, out, ack_in, ack_out, timestamp }). + +-record(msg_status, + { seq_id, + msg_id, + msg, + is_persistent, + is_delivered, + msg_in_store, + index_on_disk, + persist_to, + msg_props + }). + +-record(delta, + { start_seq_id, %% start_seq_id is inclusive + count, + end_seq_id %% end_seq_id is exclusive + }). + +-define(HEADER_GUESS_SIZE, 100). %% see determine_persist_to/2 +-define(PERSISTENT_MSG_STORE, msg_store_persistent). +-define(TRANSIENT_MSG_STORE, msg_store_transient). +-define(QUEUE, lqueue). +-define(TIMEOUT_TEST_MSG, <<"timeout_test_msg!">>). + +-include("rabbit.hrl"). +-include("rabbit_framing.hrl"). + +%%---------------------------------------------------------------------------- + +-rabbit_upgrade({multiple_routing_keys, local, []}). + +-ifdef(use_specs). + +-type(seq_id() :: non_neg_integer()). + +-type(rates() :: #rates { in :: float(), + out :: float(), + ack_in :: float(), + ack_out :: float(), + timestamp :: rabbit_types:timestamp()}). + +-type(delta() :: #delta { start_seq_id :: non_neg_integer(), + count :: non_neg_integer(), + end_seq_id :: non_neg_integer() }). + +%% The compiler (rightfully) complains that ack() and state() are +%% unused. For this reason we duplicate a -spec from +%% rabbit_backing_queue with the only intent being to remove +%% warnings. The problem here is that we can't parameterise the BQ +%% behaviour by these two types as we would like to. We still leave +%% these here for documentation purposes. +-type(ack() :: seq_id()). +-type(state() :: #vqstate { + q1 :: ?QUEUE:?QUEUE(), + q2 :: ?QUEUE:?QUEUE(), + delta :: delta(), + q3 :: ?QUEUE:?QUEUE(), + q4 :: ?QUEUE:?QUEUE(), + next_seq_id :: seq_id(), + ram_pending_ack :: gb_trees:tree(), + disk_pending_ack :: gb_trees:tree(), + qi_pending_ack :: gb_trees:tree(), + index_state :: any(), + msg_store_clients :: 'undefined' | {{any(), binary()}, + {any(), binary()}}, + durable :: boolean(), + transient_threshold :: non_neg_integer(), + qi_embed_msgs_below :: non_neg_integer(), + + len :: non_neg_integer(), + bytes :: non_neg_integer(), + unacked_bytes :: non_neg_integer(), + + persistent_count :: non_neg_integer(), + persistent_bytes :: non_neg_integer(), + + target_ram_count :: non_neg_integer() | 'infinity', + ram_msg_count :: non_neg_integer(), + ram_msg_count_prev :: non_neg_integer(), + ram_ack_count_prev :: non_neg_integer(), + ram_bytes :: non_neg_integer(), + out_counter :: non_neg_integer(), + in_counter :: non_neg_integer(), + rates :: rates(), + msgs_on_disk :: gb_sets:set(), + msg_indices_on_disk :: gb_sets:set(), + unconfirmed :: gb_sets:set(), + confirmed :: gb_sets:set(), + ack_out_counter :: non_neg_integer(), + ack_in_counter :: non_neg_integer(), + disk_read_count :: non_neg_integer(), + disk_write_count :: non_neg_integer(), + + io_batch_size :: pos_integer(), + mode :: 'default' | 'lazy' }). +%% Duplicated from rabbit_backing_queue +-spec(ack/2 :: ([ack()], state()) -> {[rabbit_guid:guid()], state()}). + +-spec(multiple_routing_keys/0 :: () -> 'ok'). + +-endif. + +-define(BLANK_DELTA, #delta { start_seq_id = undefined, + count = 0, + end_seq_id = undefined }). +-define(BLANK_DELTA_PATTERN(Z), #delta { start_seq_id = Z, + count = 0, + end_seq_id = Z }). + +-define(MICROS_PER_SECOND, 1000000.0). + +%% We're sampling every 5s for RAM duration; a half life that is of +%% the same order of magnitude is probably about right. +-define(RATE_AVG_HALF_LIFE, 5.0). + +%% We will recalculate the #rates{} every time we get asked for our +%% RAM duration, or every N messages published, whichever is +%% sooner. We do this since the priority calculations in +%% rabbit_amqqueue_process need fairly fresh rates. +-define(MSGS_PER_RATE_CALC, 100). + +%%---------------------------------------------------------------------------- +%% Public API +%%---------------------------------------------------------------------------- + +start(DurableQueues) -> + {AllTerms, StartFunState} = rabbit_queue_index:start(DurableQueues), + start_msg_store( + [Ref || Terms <- AllTerms, + Terms /= non_clean_shutdown, + begin + Ref = proplists:get_value(persistent_ref, Terms), + Ref =/= undefined + end], + StartFunState), + {ok, AllTerms}. + +stop() -> + ok = stop_msg_store(), + ok = rabbit_queue_index:stop(). + +start_msg_store(Refs, StartFunState) -> + ok = rabbit_sup:start_child(?TRANSIENT_MSG_STORE, rabbit_msg_store, + [?TRANSIENT_MSG_STORE, rabbit_mnesia:dir(), + undefined, {fun (ok) -> finished end, ok}]), + ok = rabbit_sup:start_child(?PERSISTENT_MSG_STORE, rabbit_msg_store, + [?PERSISTENT_MSG_STORE, rabbit_mnesia:dir(), + Refs, StartFunState]). + +stop_msg_store() -> + ok = rabbit_sup:stop_child(?PERSISTENT_MSG_STORE), + ok = rabbit_sup:stop_child(?TRANSIENT_MSG_STORE). + +init(Queue, Recover, Callback) -> + init( + Queue, Recover, Callback, + fun (MsgIds, ActionTaken) -> + msgs_written_to_disk(Callback, MsgIds, ActionTaken) + end, + fun (MsgIds) -> msg_indices_written_to_disk(Callback, MsgIds) end, + fun (MsgIds) -> msgs_and_indices_written_to_disk(Callback, MsgIds) end). + +init(#amqqueue { name = QueueName, durable = IsDurable }, new, + AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) -> + IndexState = rabbit_queue_index:init(QueueName, + MsgIdxOnDiskFun, MsgAndIdxOnDiskFun), + init(IsDurable, IndexState, 0, 0, [], + case IsDurable of + true -> msg_store_client_init(?PERSISTENT_MSG_STORE, + MsgOnDiskFun, AsyncCallback); + false -> undefined + end, + msg_store_client_init(?TRANSIENT_MSG_STORE, undefined, AsyncCallback)); + +%% We can be recovering a transient queue if it crashed +init(#amqqueue { name = QueueName, durable = IsDurable }, Terms, + AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) -> + {PRef, RecoveryTerms} = process_recovery_terms(Terms), + {PersistentClient, ContainsCheckFun} = + case IsDurable of + true -> C = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef, + MsgOnDiskFun, AsyncCallback), + {C, fun (MsgId) when is_binary(MsgId) -> + rabbit_msg_store:contains(MsgId, C); + (#basic_message{is_persistent = Persistent}) -> + Persistent + end}; + false -> {undefined, fun(_MsgId) -> false end} + end, + TransientClient = msg_store_client_init(?TRANSIENT_MSG_STORE, + undefined, AsyncCallback), + {DeltaCount, DeltaBytes, IndexState} = + rabbit_queue_index:recover( + QueueName, RecoveryTerms, + rabbit_msg_store:successfully_recovered_state(?PERSISTENT_MSG_STORE), + ContainsCheckFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun), + init(IsDurable, IndexState, DeltaCount, DeltaBytes, RecoveryTerms, + PersistentClient, TransientClient). + +process_recovery_terms(Terms=non_clean_shutdown) -> + {rabbit_guid:gen(), Terms}; +process_recovery_terms(Terms) -> + case proplists:get_value(persistent_ref, Terms) of + undefined -> {rabbit_guid:gen(), []}; + PRef -> {PRef, Terms} + end. + +terminate(_Reason, State) -> + State1 = #vqstate { persistent_count = PCount, + persistent_bytes = PBytes, + index_state = IndexState, + msg_store_clients = {MSCStateP, MSCStateT} } = + purge_pending_ack(true, State), + PRef = case MSCStateP of + undefined -> undefined; + _ -> ok = rabbit_msg_store:client_terminate(MSCStateP), + rabbit_msg_store:client_ref(MSCStateP) + end, + ok = rabbit_msg_store:client_delete_and_terminate(MSCStateT), + Terms = [{persistent_ref, PRef}, + {persistent_count, PCount}, + {persistent_bytes, PBytes}], + a(State1 #vqstate { index_state = rabbit_queue_index:terminate( + Terms, IndexState), + msg_store_clients = undefined }). + +%% the only difference between purge and delete is that delete also +%% needs to delete everything that's been delivered and not ack'd. +delete_and_terminate(_Reason, State) -> + %% Normally when we purge messages we interact with the qi by + %% issues delivers and acks for every purged message. In this case + %% we don't need to do that, so we just delete the qi. + State1 = purge_and_index_reset(State), + State2 = #vqstate { msg_store_clients = {MSCStateP, MSCStateT} } = + purge_pending_ack_delete_and_terminate(State1), + case MSCStateP of + undefined -> ok; + _ -> rabbit_msg_store:client_delete_and_terminate(MSCStateP) + end, + rabbit_msg_store:client_delete_and_terminate(MSCStateT), + a(State2 #vqstate { msg_store_clients = undefined }). + +delete_crashed(#amqqueue{name = QName}) -> + ok = rabbit_queue_index:erase(QName). + +purge(State = #vqstate { len = Len, qi_pending_ack= QPA }) -> + maybe_delay(QPA), + case is_pending_ack_empty(State) of + true -> + {Len, purge_and_index_reset(State)}; + false -> + {Len, purge_when_pending_acks(State)} + end. + +purge_acks(State) -> a(purge_pending_ack(false, State)). + +publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State) -> + State1 = + publish1(Msg, MsgProps, IsDelivered, ChPid, Flow, + fun maybe_write_to_disk/4, + State), + a(reduce_memory_use(maybe_update_rates(State1))). + +batch_publish(Publishes, ChPid, Flow, State) -> + {ChPid, Flow, State1} = + lists:foldl(fun batch_publish1/2, {ChPid, Flow, State}, Publishes), + State2 = ui(State1), + a(reduce_memory_use(maybe_update_rates(State2))). + +publish_delivered(Msg, MsgProps, ChPid, Flow, State) -> + {SeqId, State1} = + publish_delivered1(Msg, MsgProps, ChPid, Flow, + fun maybe_write_to_disk/4, + State), + {SeqId, a(reduce_memory_use(maybe_update_rates(State1)))}. + +batch_publish_delivered(Publishes, ChPid, Flow, State) -> + {ChPid, Flow, SeqIds, State1} = + lists:foldl(fun batch_publish_delivered1/2, + {ChPid, Flow, [], State}, Publishes), + State2 = ui(State1), + {lists:reverse(SeqIds), a(reduce_memory_use(maybe_update_rates(State2)))}. + +discard(_MsgId, _ChPid, _Flow, State) -> State. + +drain_confirmed(State = #vqstate { confirmed = C }) -> + case gb_sets:is_empty(C) of + true -> {[], State}; %% common case + false -> {gb_sets:to_list(C), State #vqstate { + confirmed = gb_sets:new() }} + end. + +dropwhile(Pred, State) -> + {MsgProps, State1} = + remove_by_predicate(Pred, State), + {MsgProps, a(State1)}. + +fetchwhile(Pred, Fun, Acc, State) -> + {MsgProps, Acc1, State1} = + fetch_by_predicate(Pred, Fun, Acc, State), + {MsgProps, Acc1, a(State1)}. + +fetch(AckRequired, State) -> + case queue_out(State) of + {empty, State1} -> + {empty, a(State1)}; + {{value, MsgStatus}, State1} -> + %% it is possible that the message wasn't read from disk + %% at this point, so read it in. + {Msg, State2} = read_msg(MsgStatus, State1), + {AckTag, State3} = remove(AckRequired, MsgStatus, State2), + {{Msg, MsgStatus#msg_status.is_delivered, AckTag}, a(State3)} + end. + +drop(AckRequired, State) -> + case queue_out(State) of + {empty, State1} -> + {empty, a(State1)}; + {{value, MsgStatus}, State1} -> + {AckTag, State2} = remove(AckRequired, MsgStatus, State1), + {{MsgStatus#msg_status.msg_id, AckTag}, a(State2)} + end. + +ack([], State) -> + {[], State}; +%% optimisation: this head is essentially a partial evaluation of the +%% general case below, for the single-ack case. +ack([SeqId], State) -> + {#msg_status { msg_id = MsgId, + is_persistent = IsPersistent, + msg_in_store = MsgInStore, + index_on_disk = IndexOnDisk }, + State1 = #vqstate { index_state = IndexState, + msg_store_clients = MSCState, + ack_out_counter = AckOutCount }} = + remove_pending_ack(true, SeqId, State), + IndexState1 = case IndexOnDisk of + true -> rabbit_queue_index:ack([SeqId], IndexState); + false -> IndexState + end, + case MsgInStore of + true -> ok = msg_store_remove(MSCState, IsPersistent, [MsgId]); + false -> ok + end, + {[MsgId], + a(State1 #vqstate { index_state = IndexState1, + ack_out_counter = AckOutCount + 1 })}; +ack(AckTags, State) -> + {{IndexOnDiskSeqIds, MsgIdsByStore, AllMsgIds}, + State1 = #vqstate { index_state = IndexState, + msg_store_clients = MSCState, + ack_out_counter = AckOutCount }} = + lists:foldl( + fun (SeqId, {Acc, State2}) -> + {MsgStatus, State3} = remove_pending_ack(true, SeqId, State2), + {accumulate_ack(MsgStatus, Acc), State3} + end, {accumulate_ack_init(), State}, AckTags), + IndexState1 = rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState), + remove_msgs_by_id(MsgIdsByStore, MSCState), + {lists:reverse(AllMsgIds), + a(State1 #vqstate { index_state = IndexState1, + ack_out_counter = AckOutCount + length(AckTags) })}. + +requeue(AckTags, #vqstate { mode = default, + delta = Delta, + q3 = Q3, + q4 = Q4, + in_counter = InCounter, + len = Len, + qi_pending_ack = QPA } = State) -> + maybe_delay(QPA), + {SeqIds, Q4a, MsgIds, State1} = queue_merge(lists:sort(AckTags), Q4, [], + beta_limit(Q3), + fun publish_alpha/2, State), + {SeqIds1, Q3a, MsgIds1, State2} = queue_merge(SeqIds, Q3, MsgIds, + delta_limit(Delta), + fun publish_beta/2, State1), + {Delta1, MsgIds2, State3} = delta_merge(SeqIds1, Delta, MsgIds1, + State2), + MsgCount = length(MsgIds2), + {MsgIds2, a(reduce_memory_use( + maybe_update_rates( + State3 #vqstate { delta = Delta1, + q3 = Q3a, + q4 = Q4a, + in_counter = InCounter + MsgCount, + len = Len + MsgCount })))}; +requeue(AckTags, #vqstate { mode = lazy, + delta = Delta, + q3 = Q3, + in_counter = InCounter, + len = Len, + qi_pending_ack = QPA } = State) -> + maybe_delay(QPA), + {SeqIds, Q3a, MsgIds, State1} = queue_merge(lists:sort(AckTags), Q3, [], + delta_limit(Delta), + fun publish_beta/2, State), + {Delta1, MsgIds1, State2} = delta_merge(SeqIds, Delta, MsgIds, + State1), + MsgCount = length(MsgIds1), + {MsgIds1, a(reduce_memory_use( + maybe_update_rates( + State2 #vqstate { delta = Delta1, + q3 = Q3a, + in_counter = InCounter + MsgCount, + len = Len + MsgCount })))}. + +ackfold(MsgFun, Acc, State, AckTags) -> + {AccN, StateN} = + lists:foldl(fun(SeqId, {Acc0, State0}) -> + MsgStatus = lookup_pending_ack(SeqId, State0), + {Msg, State1} = read_msg(MsgStatus, State0), + {MsgFun(Msg, SeqId, Acc0), State1} + end, {Acc, State}, AckTags), + {AccN, a(StateN)}. + +fold(Fun, Acc, State = #vqstate{index_state = IndexState}) -> + {Its, IndexState1} = lists:foldl(fun inext/2, {[], IndexState}, + [msg_iterator(State), + disk_ack_iterator(State), + ram_ack_iterator(State), + qi_ack_iterator(State)]), + ifold(Fun, Acc, Its, State#vqstate{index_state = IndexState1}). + +len(#vqstate { len = Len, qi_pending_ack = QPA }) -> + maybe_delay(QPA), + Len. + +is_empty(State) -> 0 == len(State). + +depth(State) -> + len(State) + count_pending_acks(State). + +set_ram_duration_target( + DurationTarget, State = #vqstate { + rates = #rates { in = AvgIngressRate, + out = AvgEgressRate, + ack_in = AvgAckIngressRate, + ack_out = AvgAckEgressRate }, + target_ram_count = TargetRamCount }) -> + Rate = + AvgEgressRate + AvgIngressRate + AvgAckEgressRate + AvgAckIngressRate, + TargetRamCount1 = + case DurationTarget of + infinity -> infinity; + _ -> trunc(DurationTarget * Rate) %% msgs = sec * msgs/sec + end, + State1 = State #vqstate { target_ram_count = TargetRamCount1 }, + a(case TargetRamCount1 == infinity orelse + (TargetRamCount =/= infinity andalso + TargetRamCount1 >= TargetRamCount) of + true -> State1; + false -> reduce_memory_use(State1) + end). + +maybe_update_rates(State = #vqstate{ in_counter = InCount, + out_counter = OutCount }) + when InCount + OutCount > ?MSGS_PER_RATE_CALC -> + update_rates(State); +maybe_update_rates(State) -> + State. + +update_rates(State = #vqstate{ in_counter = InCount, + out_counter = OutCount, + ack_in_counter = AckInCount, + ack_out_counter = AckOutCount, + rates = #rates{ in = InRate, + out = OutRate, + ack_in = AckInRate, + ack_out = AckOutRate, + timestamp = TS }}) -> + Now = time_compat:monotonic_time(), + + Rates = #rates { in = update_rate(Now, TS, InCount, InRate), + out = update_rate(Now, TS, OutCount, OutRate), + ack_in = update_rate(Now, TS, AckInCount, AckInRate), + ack_out = update_rate(Now, TS, AckOutCount, AckOutRate), + timestamp = Now }, + + State#vqstate{ in_counter = 0, + out_counter = 0, + ack_in_counter = 0, + ack_out_counter = 0, + rates = Rates }. + +update_rate(Now, TS, Count, Rate) -> + Time = time_compat:convert_time_unit(Now - TS, native, micro_seconds) / + ?MICROS_PER_SECOND, + if + Time == 0 -> Rate; + true -> rabbit_misc:moving_average(Time, ?RATE_AVG_HALF_LIFE, + Count / Time, Rate) + end. + +ram_duration(State) -> + State1 = #vqstate { rates = #rates { in = AvgIngressRate, + out = AvgEgressRate, + ack_in = AvgAckIngressRate, + ack_out = AvgAckEgressRate }, + ram_msg_count = RamMsgCount, + ram_msg_count_prev = RamMsgCountPrev, + ram_pending_ack = RPA, + qi_pending_ack = QPA, + ram_ack_count_prev = RamAckCountPrev } = + update_rates(State), + + RamAckCount = gb_trees:size(RPA) + gb_trees:size(QPA), + + Duration = %% msgs+acks / (msgs+acks/sec) == sec + case lists:all(fun (X) -> X < 0.01 end, + [AvgEgressRate, AvgIngressRate, + AvgAckEgressRate, AvgAckIngressRate]) of + true -> infinity; + false -> (RamMsgCountPrev + RamMsgCount + + RamAckCount + RamAckCountPrev) / + (4 * (AvgEgressRate + AvgIngressRate + + AvgAckEgressRate + AvgAckIngressRate)) + end, + + {Duration, State1}. + +needs_timeout(#vqstate { index_state = IndexState }) -> + case rabbit_queue_index:needs_sync(IndexState) of + confirms -> timed; + other -> idle; + false -> false + end. + +timeout(State = #vqstate { index_state = IndexState }) -> + State #vqstate { index_state = rabbit_queue_index:sync(IndexState) }. + +handle_pre_hibernate(State = #vqstate { index_state = IndexState }) -> + State #vqstate { index_state = rabbit_queue_index:flush(IndexState) }. + +resume(State) -> a(reduce_memory_use(State)). + +msg_rates(#vqstate { rates = #rates { in = AvgIngressRate, + out = AvgEgressRate } }) -> + {AvgIngressRate, AvgEgressRate}. + +info(messages_ready_ram, #vqstate{ram_msg_count = RamMsgCount}) -> + RamMsgCount; +info(messages_unacknowledged_ram, #vqstate{ram_pending_ack = RPA, + qi_pending_ack = QPA}) -> + gb_trees:size(RPA) + gb_trees:size(QPA); +info(messages_ram, State) -> + info(messages_ready_ram, State) + info(messages_unacknowledged_ram, State); +info(messages_persistent, #vqstate{persistent_count = PersistentCount}) -> + PersistentCount; +info(message_bytes, #vqstate{bytes = Bytes, + unacked_bytes = UBytes}) -> + Bytes + UBytes; +info(message_bytes_ready, #vqstate{bytes = Bytes}) -> + Bytes; +info(message_bytes_unacknowledged, #vqstate{unacked_bytes = UBytes}) -> + UBytes; +info(message_bytes_ram, #vqstate{ram_bytes = RamBytes}) -> + RamBytes; +info(message_bytes_persistent, #vqstate{persistent_bytes = PersistentBytes}) -> + PersistentBytes; +info(head_message_timestamp, #vqstate{ + q3 = Q3, + q4 = Q4, + ram_pending_ack = RPA, + qi_pending_ack = QPA}) -> + head_message_timestamp(Q3, Q4, RPA, QPA); +info(disk_reads, #vqstate{disk_read_count = Count}) -> + Count; +info(disk_writes, #vqstate{disk_write_count = Count}) -> + Count; +info(backing_queue_status, #vqstate { + q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4, + mode = Mode, + len = Len, + target_ram_count = TargetRamCount, + next_seq_id = NextSeqId, + rates = #rates { in = AvgIngressRate, + out = AvgEgressRate, + ack_in = AvgAckIngressRate, + ack_out = AvgAckEgressRate }}) -> + + [ {mode , Mode}, + {q1 , ?QUEUE:len(Q1)}, + {q2 , ?QUEUE:len(Q2)}, + {delta , Delta}, + {q3 , ?QUEUE:len(Q3)}, + {q4 , ?QUEUE:len(Q4)}, + {len , Len}, + {target_ram_count , TargetRamCount}, + {next_seq_id , NextSeqId}, + {avg_ingress_rate , AvgIngressRate}, + {avg_egress_rate , AvgEgressRate}, + {avg_ack_ingress_rate, AvgAckIngressRate}, + {avg_ack_egress_rate , AvgAckEgressRate} ]; +info(Item, _) -> + throw({bad_argument, Item}). + +invoke(?MODULE, Fun, State) -> Fun(?MODULE, State); +invoke( _, _, State) -> State. + +is_duplicate(_Msg, State) -> {false, State}. + +set_queue_mode(Mode, State = #vqstate { mode = Mode }) -> + State; +set_queue_mode(lazy, State = #vqstate { + target_ram_count = TargetRamCount }) -> + %% To become a lazy queue we need to page everything to disk first. + State1 = convert_to_lazy(State), + %% restore the original target_ram_count + a(State1 #vqstate { mode = lazy, target_ram_count = TargetRamCount }); +set_queue_mode(default, State) -> + %% becoming a default queue means loading messages from disk like + %% when a queue is recovered. + a(maybe_deltas_to_betas(State #vqstate { mode = default })); +set_queue_mode(_, State) -> + State. + +zip_msgs_and_acks(Msgs, AckTags, Accumulator, _State) -> + lists:foldl(fun ({{#basic_message{ id = Id }, _Props}, AckTag}, Acc) -> + [{Id, AckTag} | Acc] + end, Accumulator, lists:zip(Msgs, AckTags)). + +convert_to_lazy(State) -> + State1 = #vqstate { delta = Delta, q3 = Q3, len = Len } = + set_ram_duration_target(0, State), + case Delta#delta.count + ?QUEUE:len(Q3) == Len of + true -> + State1; + false -> + %% When pushing messages to disk, we might have been + %% blocked by the msg_store, so we need to see if we have + %% to wait for more credit, and then keep paging messages. + %% + %% The amqqueue_process could have taken care of this, but + %% between the time it receives the bump_credit msg and + %% calls BQ:resume to keep paging messages to disk, some + %% other request may arrive to the BQ which at this moment + %% is not in a proper state for a lazy BQ (unless all + %% messages have been paged to disk already). + wait_for_msg_store_credit(), + convert_to_lazy(State1) + end. + +wait_for_msg_store_credit() -> + case credit_flow:blocked() of + true -> receive + {bump_credit, Msg} -> + credit_flow:handle_bump_msg(Msg) + end; + false -> ok + end. + +%% Get the Timestamp property of the first msg, if present. This is +%% the one with the oldest timestamp among the heads of the pending +%% acks and unread queues. We can't check disk_pending_acks as these +%% are paged out - we assume some will soon be paged in rather than +%% forcing it to happen. Pending ack msgs are included as they are +%% regarded as unprocessed until acked, this also prevents the result +%% apparently oscillating during repeated rejects. Q3 is only checked +%% when Q4 is empty as any Q4 msg will be earlier. +head_message_timestamp(Q3, Q4, RPA, QPA) -> + HeadMsgs = [ HeadMsgStatus#msg_status.msg || + HeadMsgStatus <- + [ get_qs_head([Q4, Q3]), + get_pa_head(RPA), + get_pa_head(QPA) ], + HeadMsgStatus /= undefined, + HeadMsgStatus#msg_status.msg /= undefined ], + + Timestamps = + [Timestamp || HeadMsg <- HeadMsgs, + Timestamp <- [rabbit_basic:extract_timestamp( + HeadMsg#basic_message.content)], + Timestamp /= undefined + ], + + case Timestamps == [] of + true -> ''; + false -> lists:min(Timestamps) + end. + +get_qs_head(Qs) -> + catch lists:foldl( + fun (Q, Acc) -> + case get_q_head(Q) of + undefined -> Acc; + Val -> throw(Val) + end + end, undefined, Qs). + +get_q_head(Q) -> + get_collection_head(Q, fun ?QUEUE:is_empty/1, fun ?QUEUE:peek/1). + +get_pa_head(PA) -> + get_collection_head(PA, fun gb_trees:is_empty/1, fun gb_trees:smallest/1). + +get_collection_head(Col, IsEmpty, GetVal) -> + case IsEmpty(Col) of + false -> + {_, MsgStatus} = GetVal(Col), + MsgStatus; + true -> undefined + end. + +%%---------------------------------------------------------------------------- +%% Minor helpers +%%---------------------------------------------------------------------------- +a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4, + mode = default, + len = Len, + bytes = Bytes, + unacked_bytes = UnackedBytes, + persistent_count = PersistentCount, + persistent_bytes = PersistentBytes, + ram_msg_count = RamMsgCount, + ram_bytes = RamBytes}) -> + E1 = ?QUEUE:is_empty(Q1), + E2 = ?QUEUE:is_empty(Q2), + ED = Delta#delta.count == 0, + E3 = ?QUEUE:is_empty(Q3), + E4 = ?QUEUE:is_empty(Q4), + LZ = Len == 0, + + %% if q1 has messages then q3 cannot be empty. See publish/6. + true = E1 or not E3, + %% if q2 has messages then we have messages in delta (paged to + %% disk). See push_alphas_to_betas/2. + true = E2 or not ED, + %% if delta has messages then q3 cannot be empty. This is enforced + %% by paging, where min([?SEGMENT_ENTRY_COUNT, len(q3)]) messages + %% are always kept on RAM. + true = ED or not E3, + %% if the queue length is 0, then q3 and q4 must be empty. + true = LZ == (E3 and E4), + + true = Len >= 0, + true = Bytes >= 0, + true = UnackedBytes >= 0, + true = PersistentCount >= 0, + true = PersistentBytes >= 0, + true = RamMsgCount >= 0, + true = RamMsgCount =< Len, + true = RamBytes >= 0, + true = RamBytes =< Bytes + UnackedBytes, + + State; +a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4, + mode = lazy, + len = Len, + bytes = Bytes, + unacked_bytes = UnackedBytes, + persistent_count = PersistentCount, + persistent_bytes = PersistentBytes, + ram_msg_count = RamMsgCount, + ram_bytes = RamBytes}) -> + E1 = ?QUEUE:is_empty(Q1), + E2 = ?QUEUE:is_empty(Q2), + ED = Delta#delta.count == 0, + E3 = ?QUEUE:is_empty(Q3), + E4 = ?QUEUE:is_empty(Q4), + LZ = Len == 0, + L3 = ?QUEUE:len(Q3), + + %% q1 must always be empty, since q1 only gets messages during + %% publish, but for lazy queues messages go straight to delta. + true = E1, + + %% q2 only gets messages from q1 when push_alphas_to_betas is + %% called for a non empty delta, which won't be the case for a + %% lazy queue. This means q2 must always be empty. + true = E2, + + %% q4 must always be empty, since q1 only gets messages during + %% publish, but for lazy queues messages go straight to delta. + true = E4, + + %% if the queue is empty, then delta is empty and q3 is empty. + true = LZ == (ED and E3), + + %% There should be no messages in q1, q2, and q4 + true = Delta#delta.count + L3 == Len, + + true = Len >= 0, + true = Bytes >= 0, + true = UnackedBytes >= 0, + true = PersistentCount >= 0, + true = PersistentBytes >= 0, + true = RamMsgCount >= 0, + true = RamMsgCount =< Len, + true = RamBytes >= 0, + true = RamBytes =< Bytes + UnackedBytes, + + State. + +d(Delta = #delta { start_seq_id = Start, count = Count, end_seq_id = End }) + when Start + Count =< End -> + Delta. + +m(MsgStatus = #msg_status { is_persistent = IsPersistent, + msg_in_store = MsgInStore, + index_on_disk = IndexOnDisk }) -> + true = (not IsPersistent) or IndexOnDisk, + true = msg_in_ram(MsgStatus) or MsgInStore, + MsgStatus. + +one_if(true ) -> 1; +one_if(false) -> 0. + +cons_if(true, E, L) -> [E | L]; +cons_if(false, _E, L) -> L. + +gb_sets_maybe_insert(false, _Val, Set) -> Set; +gb_sets_maybe_insert(true, Val, Set) -> gb_sets:add(Val, Set). + +msg_status(IsPersistent, IsDelivered, SeqId, + Msg = #basic_message {id = MsgId}, MsgProps, IndexMaxSize) -> + #msg_status{seq_id = SeqId, + msg_id = MsgId, + msg = Msg, + is_persistent = IsPersistent, + is_delivered = IsDelivered, + msg_in_store = false, + index_on_disk = false, + persist_to = determine_persist_to(Msg, MsgProps, IndexMaxSize), + msg_props = MsgProps}. + +beta_msg_status({Msg = #basic_message{id = MsgId}, + SeqId, MsgProps, IsPersistent, IsDelivered}) -> + MS0 = beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered), + MS0#msg_status{msg_id = MsgId, + msg = Msg, + persist_to = queue_index, + msg_in_store = false}; + +beta_msg_status({MsgId, SeqId, MsgProps, IsPersistent, IsDelivered}) -> + MS0 = beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered), + MS0#msg_status{msg_id = MsgId, + msg = undefined, + persist_to = msg_store, + msg_in_store = true}. + +beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered) -> + #msg_status{seq_id = SeqId, + msg = undefined, + is_persistent = IsPersistent, + is_delivered = IsDelivered, + index_on_disk = true, + msg_props = MsgProps}. + +trim_msg_status(MsgStatus) -> + case persist_to(MsgStatus) of + msg_store -> MsgStatus#msg_status{msg = undefined}; + queue_index -> MsgStatus + end. + +with_msg_store_state({MSCStateP, MSCStateT}, true, Fun) -> + {Result, MSCStateP1} = Fun(MSCStateP), + {Result, {MSCStateP1, MSCStateT}}; +with_msg_store_state({MSCStateP, MSCStateT}, false, Fun) -> + {Result, MSCStateT1} = Fun(MSCStateT), + {Result, {MSCStateP, MSCStateT1}}. + +with_immutable_msg_store_state(MSCState, IsPersistent, Fun) -> + {Res, MSCState} = with_msg_store_state(MSCState, IsPersistent, + fun (MSCState1) -> + {Fun(MSCState1), MSCState1} + end), + Res. + +msg_store_client_init(MsgStore, MsgOnDiskFun, Callback) -> + msg_store_client_init(MsgStore, rabbit_guid:gen(), MsgOnDiskFun, + Callback). + +msg_store_client_init(MsgStore, Ref, MsgOnDiskFun, Callback) -> + CloseFDsFun = msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE), + rabbit_msg_store:client_init(MsgStore, Ref, MsgOnDiskFun, + fun () -> Callback(?MODULE, CloseFDsFun) end). + +msg_store_write(MSCState, IsPersistent, MsgId, Msg) -> + with_immutable_msg_store_state( + MSCState, IsPersistent, + fun (MSCState1) -> + rabbit_msg_store:write_flow(MsgId, Msg, MSCState1) + end). + +msg_store_read(MSCState, IsPersistent, MsgId) -> + with_msg_store_state( + MSCState, IsPersistent, + fun (MSCState1) -> + rabbit_msg_store:read(MsgId, MSCState1) + end). + +msg_store_remove(MSCState, IsPersistent, MsgIds) -> + with_immutable_msg_store_state( + MSCState, IsPersistent, + fun (MCSState1) -> + rabbit_msg_store:remove(MsgIds, MCSState1) + end). + +msg_store_close_fds(MSCState, IsPersistent) -> + with_msg_store_state( + MSCState, IsPersistent, + fun (MSCState1) -> rabbit_msg_store:close_all_indicated(MSCState1) end). + +msg_store_close_fds_fun(IsPersistent) -> + fun (?MODULE, State = #vqstate { msg_store_clients = MSCState }) -> + {ok, MSCState1} = msg_store_close_fds(MSCState, IsPersistent), + State #vqstate { msg_store_clients = MSCState1 } + end. + +maybe_write_delivered(false, _SeqId, IndexState) -> + IndexState; +maybe_write_delivered(true, SeqId, IndexState) -> + rabbit_queue_index:deliver([SeqId], IndexState). + +betas_from_index_entries(List, TransientThreshold, DelsAndAcksFun, State) -> + {Filtered, Delivers, Acks, RamReadyCount, RamBytes} = + lists:foldr( + fun ({_MsgOrId, SeqId, _MsgProps, IsPersistent, IsDelivered} = M, + {Filtered1, Delivers1, Acks1, RRC, RB} = Acc) -> + case SeqId < TransientThreshold andalso not IsPersistent of + true -> {Filtered1, + cons_if(not IsDelivered, SeqId, Delivers1), + [SeqId | Acks1], RRC, RB}; + false -> MsgStatus = m(beta_msg_status(M)), + HaveMsg = msg_in_ram(MsgStatus), + Size = msg_size(MsgStatus), + case is_msg_in_pending_acks(SeqId, State) of + false -> {?QUEUE:in_r(MsgStatus, Filtered1), + Delivers1, Acks1, + RRC + one_if(HaveMsg), + RB + one_if(HaveMsg) * Size}; + true -> Acc %% [0] + end + end + end, {?QUEUE:new(), [], [], 0, 0}, List), + {Filtered, RamReadyCount, RamBytes, DelsAndAcksFun(Delivers, Acks, State)}. +%% [0] We don't increase RamBytes here, even though it pertains to +%% unacked messages too, since if HaveMsg then the message must have +%% been stored in the QI, thus the message must have been in +%% qi_pending_ack, thus it must already have been in RAM. + +is_msg_in_pending_acks(SeqId, #vqstate { ram_pending_ack = RPA, + disk_pending_ack = DPA, + qi_pending_ack = QPA }) -> + (gb_trees:is_defined(SeqId, RPA) orelse + gb_trees:is_defined(SeqId, DPA) orelse + gb_trees:is_defined(SeqId, QPA)). + +expand_delta(SeqId, ?BLANK_DELTA_PATTERN(X)) -> + d(#delta { start_seq_id = SeqId, count = 1, end_seq_id = SeqId + 1 }); +expand_delta(SeqId, #delta { start_seq_id = StartSeqId, + count = Count } = Delta) + when SeqId < StartSeqId -> + d(Delta #delta { start_seq_id = SeqId, count = Count + 1 }); +expand_delta(SeqId, #delta { count = Count, + end_seq_id = EndSeqId } = Delta) + when SeqId >= EndSeqId -> + d(Delta #delta { count = Count + 1, end_seq_id = SeqId + 1 }); +expand_delta(_SeqId, #delta { count = Count } = Delta) -> + d(Delta #delta { count = Count + 1 }). + +%%---------------------------------------------------------------------------- +%% Internal major helpers for Public API +%%---------------------------------------------------------------------------- + +init(IsDurable, IndexState, DeltaCount, DeltaBytes, Terms, + PersistentClient, TransientClient) -> + {LowSeqId, NextSeqId, IndexState1} = rabbit_queue_index:bounds(IndexState), + + {DeltaCount1, DeltaBytes1} = + case Terms of + non_clean_shutdown -> {DeltaCount, DeltaBytes}; + _ -> {proplists:get_value(persistent_count, + Terms, DeltaCount), + proplists:get_value(persistent_bytes, + Terms, DeltaBytes)} + end, + Delta = case DeltaCount1 == 0 andalso DeltaCount /= undefined of + true -> ?BLANK_DELTA; + false -> d(#delta { start_seq_id = LowSeqId, + count = DeltaCount1, + end_seq_id = NextSeqId }) + end, + Now = time_compat:monotonic_time(), + IoBatchSize = rabbit_misc:get_env(rabbit, msg_store_io_batch_size, + ?IO_BATCH_SIZE), + + {ok, IndexMaxSize} = application:get_env( + rabbit, queue_index_embed_msgs_below), + State = #vqstate { + q1 = ?QUEUE:new(), + q2 = ?QUEUE:new(), + delta = Delta, + q3 = ?QUEUE:new(), + q4 = ?QUEUE:new(), + next_seq_id = NextSeqId, + ram_pending_ack = gb_trees:empty(), + disk_pending_ack = gb_trees:empty(), + qi_pending_ack = gb_trees:empty(), + index_state = IndexState1, + msg_store_clients = {PersistentClient, TransientClient}, + durable = IsDurable, + transient_threshold = NextSeqId, + qi_embed_msgs_below = IndexMaxSize, + + len = DeltaCount1, + persistent_count = DeltaCount1, + bytes = DeltaBytes1, + persistent_bytes = DeltaBytes1, + + target_ram_count = infinity, + ram_msg_count = 0, + ram_msg_count_prev = 0, + ram_ack_count_prev = 0, + ram_bytes = 0, + unacked_bytes = 0, + out_counter = 0, + in_counter = 0, + rates = blank_rates(Now), + msgs_on_disk = gb_sets:new(), + msg_indices_on_disk = gb_sets:new(), + unconfirmed = gb_sets:new(), + confirmed = gb_sets:new(), + ack_out_counter = 0, + ack_in_counter = 0, + disk_read_count = 0, + disk_write_count = 0, + + io_batch_size = IoBatchSize, + + mode = default }, + a(maybe_deltas_to_betas(State)). + +blank_rates(Now) -> + #rates { in = 0.0, + out = 0.0, + ack_in = 0.0, + ack_out = 0.0, + timestamp = Now}. + +in_r(MsgStatus = #msg_status { msg = undefined }, + State = #vqstate { mode = default, q3 = Q3, q4 = Q4 }) -> + case ?QUEUE:is_empty(Q4) of + true -> State #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3) }; + false -> {Msg, State1 = #vqstate { q4 = Q4a }} = + read_msg(MsgStatus, State), + MsgStatus1 = MsgStatus#msg_status{msg = Msg}, + stats(ready0, {MsgStatus, MsgStatus1}, + State1 #vqstate { q4 = ?QUEUE:in_r(MsgStatus1, Q4a) }) + end; +in_r(MsgStatus, + State = #vqstate { mode = default, q4 = Q4 }) -> + State #vqstate { q4 = ?QUEUE:in_r(MsgStatus, Q4) }; +%% lazy queues +in_r(MsgStatus = #msg_status { seq_id = SeqId }, + State = #vqstate { mode = lazy, q3 = Q3, delta = Delta}) -> + case ?QUEUE:is_empty(Q3) of + true -> + {_MsgStatus1, State1} = + maybe_write_to_disk(true, true, MsgStatus, State), + State2 = stats(ready0, {MsgStatus, none}, State1), + Delta1 = expand_delta(SeqId, Delta), + State2 #vqstate{ delta = Delta1 }; + false -> + State #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3) } + end. + +queue_out(State = #vqstate { mode = default, q4 = Q4 }) -> + case ?QUEUE:out(Q4) of + {empty, _Q4} -> + case fetch_from_q3(State) of + {empty, _State1} = Result -> Result; + {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1} + end; + {{value, MsgStatus}, Q4a} -> + {{value, MsgStatus}, State #vqstate { q4 = Q4a }} + end; +%% lazy queues +queue_out(State = #vqstate { mode = lazy }) -> + case fetch_from_q3(State) of + {empty, _State1} = Result -> Result; + {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1} + end. + +read_msg(#msg_status{msg = undefined, + msg_id = MsgId, + is_persistent = IsPersistent}, State) -> + read_msg(MsgId, IsPersistent, State); +read_msg(#msg_status{msg = Msg}, State) -> + {Msg, State}. + +read_msg(MsgId, IsPersistent, State = #vqstate{msg_store_clients = MSCState, + disk_read_count = Count}) -> + {{ok, Msg = #basic_message {}}, MSCState1} = + msg_store_read(MSCState, IsPersistent, MsgId), + {Msg, State #vqstate {msg_store_clients = MSCState1, + disk_read_count = Count + 1}}. + +stats(Signs, Statuses, State) -> + stats0(expand_signs(Signs), expand_statuses(Statuses), State). + +expand_signs(ready0) -> {0, 0, true}; +expand_signs(lazy_pub) -> {1, 0, true}; +expand_signs({A, B}) -> {A, B, false}. + +expand_statuses({none, A}) -> {false, msg_in_ram(A), A}; +expand_statuses({B, none}) -> {msg_in_ram(B), false, B}; +expand_statuses({lazy, A}) -> {false , false, A}; +expand_statuses({B, A}) -> {msg_in_ram(B), msg_in_ram(A), B}. + +%% In this function at least, we are religious: the variable name +%% contains "Ready" or "Unacked" iff that is what it counts. If +%% neither is present it counts both. +stats0({DeltaReady, DeltaUnacked, ReadyMsgPaged}, + {InRamBefore, InRamAfter, MsgStatus}, + State = #vqstate{len = ReadyCount, + bytes = ReadyBytes, + ram_msg_count = RamReadyCount, + persistent_count = PersistentCount, + unacked_bytes = UnackedBytes, + ram_bytes = RamBytes, + persistent_bytes = PersistentBytes}) -> + S = msg_size(MsgStatus), + DeltaTotal = DeltaReady + DeltaUnacked, + DeltaRam = case {InRamBefore, InRamAfter} of + {false, false} -> 0; + {false, true} -> 1; + {true, false} -> -1; + {true, true} -> 0 + end, + DeltaRamReady = case DeltaReady of + 1 -> one_if(InRamAfter); + -1 -> -one_if(InRamBefore); + 0 when ReadyMsgPaged -> DeltaRam; + 0 -> 0 + end, + DeltaPersistent = DeltaTotal * one_if(MsgStatus#msg_status.is_persistent), + State#vqstate{len = ReadyCount + DeltaReady, + ram_msg_count = RamReadyCount + DeltaRamReady, + persistent_count = PersistentCount + DeltaPersistent, + bytes = ReadyBytes + DeltaReady * S, + unacked_bytes = UnackedBytes + DeltaUnacked * S, + ram_bytes = RamBytes + DeltaRam * S, + persistent_bytes = PersistentBytes + DeltaPersistent * S}. + +msg_size(#msg_status{msg_props = #message_properties{size = Size}}) -> Size. + +msg_in_ram(#msg_status{msg = Msg}) -> Msg =/= undefined. + +%% first param: AckRequired +remove(true, MsgStatus = #msg_status { + seq_id = SeqId, + is_delivered = IsDelivered, + index_on_disk = IndexOnDisk }, + State = #vqstate {out_counter = OutCount, + index_state = IndexState}) -> + %% Mark it delivered if necessary + IndexState1 = maybe_write_delivered( + IndexOnDisk andalso not IsDelivered, + SeqId, IndexState), + + State1 = record_pending_ack( + MsgStatus #msg_status { + is_delivered = true }, State), + + State2 = stats({-1, 1}, {MsgStatus, MsgStatus}, State1), + + {SeqId, maybe_update_rates( + State2 #vqstate {out_counter = OutCount + 1, + index_state = IndexState1})}; + +%% This function body has the same behaviour as remove_queue_entries/3 +%% but instead of removing messages based on a ?QUEUE, this removes +%% just one message, the one referenced by the MsgStatus provided. +remove(false, MsgStatus = #msg_status { + seq_id = SeqId, + msg_id = MsgId, + is_persistent = IsPersistent, + is_delivered = IsDelivered, + msg_in_store = MsgInStore, + index_on_disk = IndexOnDisk }, + State = #vqstate {out_counter = OutCount, + index_state = IndexState, + msg_store_clients = MSCState}) -> + %% Mark it delivered if necessary + IndexState1 = maybe_write_delivered( + IndexOnDisk andalso not IsDelivered, + SeqId, IndexState), + + %% Remove from msg_store and queue index, if necessary + case MsgInStore of + true -> ok = msg_store_remove(MSCState, IsPersistent, [MsgId]); + false -> ok + end, + + IndexState2 = + case IndexOnDisk of + true -> rabbit_queue_index:ack([SeqId], IndexState1); + false -> IndexState1 + end, + + State1 = stats({-1, 0}, {MsgStatus, none}, State), + + {undefined, maybe_update_rates( + State1 #vqstate {out_counter = OutCount + 1, + index_state = IndexState2})}. + +%% This function exists as a way to improve dropwhile/2 +%% performance. The idea of having this function is to optimise calls +%% to rabbit_queue_index by batching delivers and acks, instead of +%% sending them one by one. +%% +%% Instead of removing every message as their are popped from the +%% queue, it first accumulates them and then removes them by calling +%% remove_queue_entries/3, since the behaviour of +%% remove_queue_entries/3 when used with +%% process_delivers_and_acks_fun(deliver_and_ack) is the same as +%% calling remove(false, MsgStatus, State). +%% +%% remove/3 also updates the out_counter in every call, but here we do +%% it just once at the end. +remove_by_predicate(Pred, State = #vqstate {out_counter = OutCount}) -> + {MsgProps, QAcc, State1} = + collect_by_predicate(Pred, ?QUEUE:new(), State), + State2 = + remove_queue_entries( + QAcc, process_delivers_and_acks_fun(deliver_and_ack), State1), + %% maybe_update_rates/1 is called in remove/2 for every + %% message. Since we update out_counter only once, we call it just + %% there. + {MsgProps, maybe_update_rates( + State2 #vqstate { + out_counter = OutCount + ?QUEUE:len(QAcc)})}. + +%% This function exists as a way to improve fetchwhile/4 +%% performance. The idea of having this function is to optimise calls +%% to rabbit_queue_index by batching delivers, instead of sending them +%% one by one. +%% +%% Fun is the function passed to fetchwhile/4 that's +%% applied to every fetched message and used to build the fetchwhile/4 +%% result accumulator FetchAcc. +fetch_by_predicate(Pred, Fun, FetchAcc, + State = #vqstate { + index_state = IndexState, + out_counter = OutCount}) -> + {MsgProps, QAcc, State1} = + collect_by_predicate(Pred, ?QUEUE:new(), State), + + {Delivers, FetchAcc1, State2} = + process_queue_entries(QAcc, Fun, FetchAcc, State1), + + IndexState1 = rabbit_queue_index:deliver(Delivers, IndexState), + + {MsgProps, FetchAcc1, maybe_update_rates( + State2 #vqstate { + index_state = IndexState1, + out_counter = OutCount + ?QUEUE:len(QAcc)})}. + +%% We try to do here the same as what remove(true, State) does but +%% processing several messages at the same time. The idea is to +%% optimize rabbit_queue_index:deliver/2 calls by sending a list of +%% SeqIds instead of one by one, thus process_queue_entries1 will +%% accumulate the required deliveries, will record_pending_ack for +%% each message, and will update stats, like remove/2 does. +%% +%% For the meaning of Fun and FetchAcc arguments see +%% fetch_by_predicate/4 above. +process_queue_entries(Q, Fun, FetchAcc, State) -> + ?QUEUE:foldl(fun (MsgStatus, Acc) -> + process_queue_entries1(MsgStatus, Fun, Acc) + end, + {[], FetchAcc, State}, Q). + +process_queue_entries1( + #msg_status { seq_id = SeqId, is_delivered = IsDelivered, + index_on_disk = IndexOnDisk} = MsgStatus, + Fun, + {Delivers, FetchAcc, State}) -> + {Msg, State1} = read_msg(MsgStatus, State), + State2 = record_pending_ack( + MsgStatus #msg_status { + is_delivered = true }, State1), + {cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers), + Fun(Msg, SeqId, FetchAcc), + stats({-1, 1}, {MsgStatus, MsgStatus}, State2)}. + +collect_by_predicate(Pred, QAcc, State) -> + case queue_out(State) of + {empty, State1} -> + {undefined, QAcc, State1}; + {{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} -> + case Pred(MsgProps) of + true -> collect_by_predicate(Pred, ?QUEUE:in(MsgStatus, QAcc), + State1); + false -> {MsgProps, QAcc, in_r(MsgStatus, State1)} + end + end. + +%%---------------------------------------------------------------------------- +%% Helpers for Public API purge/1 function +%%---------------------------------------------------------------------------- + +%% The difference between purge_when_pending_acks/1 +%% vs. purge_and_index_reset/1 is that the first one issues a deliver +%% and an ack to the queue index for every message that's being +%% removed, while the later just resets the queue index state. +purge_when_pending_acks(State) -> + State1 = purge1(process_delivers_and_acks_fun(deliver_and_ack), State), + a(State1). + +purge_and_index_reset(State) -> + State1 = purge1(process_delivers_and_acks_fun(none), State), + a(reset_qi_state(State1)). + +%% This function removes messages from each of {q1, q2, q3, q4}. +%% +%% With remove_queue_entries/3 q1 and q4 are emptied, while q2 and q3 +%% are specially handled by purge_betas_and_deltas/2. +%% +%% purge_betas_and_deltas/2 loads messages from the queue index, +%% filling up q3 and in some cases moving messages form q2 to q3 while +%% reseting q2 to an empty queue (see maybe_deltas_to_betas/2). The +%% messages loaded into q3 are removed by calling +%% remove_queue_entries/3 until there are no more messages to be read +%% from the queue index. Messages are read in batches from the queue +%% index. +purge1(AfterFun, State = #vqstate { q4 = Q4}) -> + State1 = remove_queue_entries(Q4, AfterFun, State), + + State2 = #vqstate {q1 = Q1} = + purge_betas_and_deltas(AfterFun, State1#vqstate{q4 = ?QUEUE:new()}), + + State3 = remove_queue_entries(Q1, AfterFun, State2), + + a(State3#vqstate{q1 = ?QUEUE:new()}). + +reset_qi_state(State = #vqstate{index_state = IndexState}) -> + State#vqstate{index_state = + rabbit_queue_index:reset_state(IndexState)}. + +is_pending_ack_empty(State) -> + count_pending_acks(State) =:= 0. + +count_pending_acks(#vqstate { ram_pending_ack = RPA, + disk_pending_ack = DPA, + qi_pending_ack = QPA }) -> + gb_trees:size(RPA) + gb_trees:size(DPA) + gb_trees:size(QPA). + +purge_betas_and_deltas(DelsAndAcksFun, State = #vqstate { mode = Mode }) -> + State0 = #vqstate { q3 = Q3 } = + case Mode of + lazy -> maybe_deltas_to_betas(DelsAndAcksFun, State); + _ -> State + end, + + case ?QUEUE:is_empty(Q3) of + true -> State0; + false -> State1 = remove_queue_entries(Q3, DelsAndAcksFun, State0), + purge_betas_and_deltas(DelsAndAcksFun, + maybe_deltas_to_betas( + DelsAndAcksFun, + State1#vqstate{q3 = ?QUEUE:new()})) + end. + +remove_queue_entries(Q, DelsAndAcksFun, + State = #vqstate{msg_store_clients = MSCState}) -> + {MsgIdsByStore, Delivers, Acks, State1} = + ?QUEUE:foldl(fun remove_queue_entries1/2, + {orddict:new(), [], [], State}, Q), + remove_msgs_by_id(MsgIdsByStore, MSCState), + DelsAndAcksFun(Delivers, Acks, State1). + +remove_queue_entries1( + #msg_status { msg_id = MsgId, seq_id = SeqId, is_delivered = IsDelivered, + msg_in_store = MsgInStore, index_on_disk = IndexOnDisk, + is_persistent = IsPersistent} = MsgStatus, + {MsgIdsByStore, Delivers, Acks, State}) -> + {case MsgInStore of + true -> rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore); + false -> MsgIdsByStore + end, + cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers), + cons_if(IndexOnDisk, SeqId, Acks), + stats({-1, 0}, {MsgStatus, none}, State)}. + +process_delivers_and_acks_fun(deliver_and_ack) -> + fun (Delivers, Acks, State = #vqstate { index_state = IndexState }) -> + IndexState1 = + rabbit_queue_index:ack( + Acks, rabbit_queue_index:deliver(Delivers, IndexState)), + State #vqstate { index_state = IndexState1 } + end; +process_delivers_and_acks_fun(_) -> + fun (_, _, State) -> + State + end. + +%%---------------------------------------------------------------------------- +%% Internal gubbins for publishing +%%---------------------------------------------------------------------------- + +publish1(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId }, + MsgProps = #message_properties { needs_confirming = NeedsConfirming }, + IsDelivered, _ChPid, _Flow, PersistFun, + State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4, + mode = default, + qi_embed_msgs_below = IndexMaxSize, + next_seq_id = SeqId, + in_counter = InCount, + durable = IsDurable, + unconfirmed = UC }) -> + IsPersistent1 = IsDurable andalso IsPersistent, + MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize), + {MsgStatus1, State1} = PersistFun(false, false, MsgStatus, State), + State2 = case ?QUEUE:is_empty(Q3) of + false -> State1 #vqstate { q1 = ?QUEUE:in(m(MsgStatus1), Q1) }; + true -> State1 #vqstate { q4 = ?QUEUE:in(m(MsgStatus1), Q4) } + end, + InCount1 = InCount + 1, + UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC), + stats({1, 0}, {none, MsgStatus1}, + State2#vqstate{ next_seq_id = SeqId + 1, + in_counter = InCount1, + unconfirmed = UC1 }); +publish1(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId }, + MsgProps = #message_properties { needs_confirming = NeedsConfirming }, + IsDelivered, _ChPid, _Flow, PersistFun, + State = #vqstate { mode = lazy, + qi_embed_msgs_below = IndexMaxSize, + next_seq_id = SeqId, + in_counter = InCount, + durable = IsDurable, + unconfirmed = UC, + delta = Delta }) -> + IsPersistent1 = IsDurable andalso IsPersistent, + MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize), + {MsgStatus1, State1} = PersistFun(true, true, MsgStatus, State), + Delta1 = expand_delta(SeqId, Delta), + UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC), + stats(lazy_pub, {lazy, m(MsgStatus1)}, + State1#vqstate{ delta = Delta1, + next_seq_id = SeqId + 1, + in_counter = InCount + 1, + unconfirmed = UC1 }). + +batch_publish1({Msg, MsgProps, IsDelivered}, {ChPid, Flow, State}) -> + {ChPid, Flow, publish1(Msg, MsgProps, IsDelivered, ChPid, Flow, + fun maybe_prepare_write_to_disk/4, State)}. + +publish_delivered1(Msg = #basic_message { is_persistent = IsPersistent, + id = MsgId }, + MsgProps = #message_properties { + needs_confirming = NeedsConfirming }, + _ChPid, _Flow, PersistFun, + State = #vqstate { mode = default, + qi_embed_msgs_below = IndexMaxSize, + next_seq_id = SeqId, + out_counter = OutCount, + in_counter = InCount, + durable = IsDurable, + unconfirmed = UC }) -> + IsPersistent1 = IsDurable andalso IsPersistent, + MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize), + {MsgStatus1, State1} = PersistFun(false, false, MsgStatus, State), + State2 = record_pending_ack(m(MsgStatus1), State1), + UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC), + State3 = stats({0, 1}, {none, MsgStatus1}, + State2 #vqstate { next_seq_id = SeqId + 1, + out_counter = OutCount + 1, + in_counter = InCount + 1, + unconfirmed = UC1 }), + {SeqId, State3}; +publish_delivered1(Msg = #basic_message { is_persistent = IsPersistent, + id = MsgId }, + MsgProps = #message_properties { + needs_confirming = NeedsConfirming }, + _ChPid, _Flow, PersistFun, + State = #vqstate { mode = lazy, + qi_embed_msgs_below = IndexMaxSize, + next_seq_id = SeqId, + out_counter = OutCount, + in_counter = InCount, + durable = IsDurable, + unconfirmed = UC }) -> + IsPersistent1 = IsDurable andalso IsPersistent, + MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize), + {MsgStatus1, State1} = PersistFun(true, true, MsgStatus, State), + State2 = record_pending_ack(m(MsgStatus1), State1), + UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC), + State3 = stats({0, 1}, {none, MsgStatus1}, + State2 #vqstate { next_seq_id = SeqId + 1, + out_counter = OutCount + 1, + in_counter = InCount + 1, + unconfirmed = UC1 }), + {SeqId, State3}. + +batch_publish_delivered1({Msg, MsgProps}, {ChPid, Flow, SeqIds, State}) -> + {SeqId, State1} = + publish_delivered1(Msg, MsgProps, ChPid, Flow, + fun maybe_prepare_write_to_disk/4, + State), + {ChPid, Flow, [SeqId | SeqIds], State1}. + +maybe_write_msg_to_disk(_Force, MsgStatus = #msg_status { + msg_in_store = true }, State) -> + {MsgStatus, State}; +maybe_write_msg_to_disk(Force, MsgStatus = #msg_status { + msg = Msg, msg_id = MsgId, + is_persistent = IsPersistent }, + State = #vqstate{ msg_store_clients = MSCState, + disk_write_count = Count}) + when Force orelse IsPersistent -> + case persist_to(MsgStatus) of + msg_store -> ok = msg_store_write(MSCState, IsPersistent, MsgId, + prepare_to_store(Msg)), + {MsgStatus#msg_status{msg_in_store = true}, + State#vqstate{disk_write_count = Count + 1}}; + queue_index -> {MsgStatus, State} + end; +maybe_write_msg_to_disk(_Force, MsgStatus, State) -> + {MsgStatus, State}. + +%% Due to certain optimizations made inside +%% rabbit_queue_index:pre_publish/7 we need to have two separate +%% functions for index persistence. This one is only used when paging +%% during memory pressure. We didn't want to modify +%% maybe_write_index_to_disk/3 because that function is used in other +%% places. +maybe_batch_write_index_to_disk(_Force, + MsgStatus = #msg_status { + index_on_disk = true }, State) -> + {MsgStatus, State}; +maybe_batch_write_index_to_disk(Force, + MsgStatus = #msg_status { + msg = Msg, + msg_id = MsgId, + seq_id = SeqId, + is_persistent = IsPersistent, + is_delivered = IsDelivered, + msg_props = MsgProps}, + State = #vqstate { + target_ram_count = TargetRamCount, + disk_write_count = DiskWriteCount, + index_state = IndexState}) + when Force orelse IsPersistent -> + {MsgOrId, DiskWriteCount1} = + case persist_to(MsgStatus) of + msg_store -> {MsgId, DiskWriteCount}; + queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1} + end, + IndexState1 = rabbit_queue_index:pre_publish( + MsgOrId, SeqId, MsgProps, IsPersistent, IsDelivered, + TargetRamCount, IndexState), + {MsgStatus#msg_status{index_on_disk = true}, + State#vqstate{index_state = IndexState1, + disk_write_count = DiskWriteCount1}}; +maybe_batch_write_index_to_disk(_Force, MsgStatus, State) -> + {MsgStatus, State}. + +maybe_write_index_to_disk(_Force, MsgStatus = #msg_status { + index_on_disk = true }, State) -> + {MsgStatus, State}; +maybe_write_index_to_disk(Force, MsgStatus = #msg_status { + msg = Msg, + msg_id = MsgId, + seq_id = SeqId, + is_persistent = IsPersistent, + is_delivered = IsDelivered, + msg_props = MsgProps}, + State = #vqstate{target_ram_count = TargetRamCount, + disk_write_count = DiskWriteCount, + index_state = IndexState}) + when Force orelse IsPersistent -> + {MsgOrId, DiskWriteCount1} = + case persist_to(MsgStatus) of + msg_store -> {MsgId, DiskWriteCount}; + queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1} + end, + IndexState1 = rabbit_queue_index:publish( + MsgOrId, SeqId, MsgProps, IsPersistent, TargetRamCount, + IndexState), + IndexState2 = maybe_write_delivered(IsDelivered, SeqId, IndexState1), + {MsgStatus#msg_status{index_on_disk = true}, + State#vqstate{index_state = IndexState2, + disk_write_count = DiskWriteCount1}}; + +maybe_write_index_to_disk(_Force, MsgStatus, State) -> + {MsgStatus, State}. + +maybe_write_to_disk(ForceMsg, ForceIndex, MsgStatus, State) -> + {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State), + maybe_write_index_to_disk(ForceIndex, MsgStatus1, State1). + +maybe_prepare_write_to_disk(ForceMsg, ForceIndex, MsgStatus, State) -> + {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State), + maybe_batch_write_index_to_disk(ForceIndex, MsgStatus1, State1). + +determine_persist_to(#basic_message{ + content = #content{properties = Props, + properties_bin = PropsBin}}, + #message_properties{size = BodySize}, + IndexMaxSize) -> + %% The >= is so that you can set the env to 0 and never persist + %% to the index. + %% + %% We want this to be fast, so we avoid size(term_to_binary()) + %% here, or using the term size estimation from truncate.erl, both + %% of which are too slow. So instead, if the message body size + %% goes over the limit then we avoid any other checks. + %% + %% If it doesn't we need to decide if the properties will push + %% it past the limit. If we have the encoded properties (usual + %% case) we can just check their size. If we don't (message came + %% via the direct client), we make a guess based on the number of + %% headers. + case BodySize >= IndexMaxSize of + true -> msg_store; + false -> Est = case is_binary(PropsBin) of + true -> BodySize + size(PropsBin); + false -> #'P_basic'{headers = Hs} = Props, + case Hs of + undefined -> 0; + _ -> length(Hs) + end * ?HEADER_GUESS_SIZE + BodySize + end, + case Est >= IndexMaxSize of + true -> msg_store; + false -> queue_index + end + end. + +persist_to(#msg_status{persist_to = To}) -> To. + +prepare_to_store(Msg) -> + Msg#basic_message{ + %% don't persist any recoverable decoded properties + content = rabbit_binary_parser:clear_decoded_content( + Msg #basic_message.content)}. + +%%---------------------------------------------------------------------------- +%% Internal gubbins for acks +%%---------------------------------------------------------------------------- + +record_pending_ack(#msg_status { seq_id = SeqId } = MsgStatus, + State = #vqstate { ram_pending_ack = RPA, + disk_pending_ack = DPA, + qi_pending_ack = QPA, + ack_in_counter = AckInCount}) -> + Insert = fun (Tree) -> gb_trees:insert(SeqId, MsgStatus, Tree) end, + {RPA1, DPA1, QPA1} = + case {msg_in_ram(MsgStatus), persist_to(MsgStatus)} of + {false, _} -> {RPA, Insert(DPA), QPA}; + {_, queue_index} -> {RPA, DPA, Insert(QPA)}; + {_, msg_store} -> {Insert(RPA), DPA, QPA} + end, + State #vqstate { ram_pending_ack = RPA1, + disk_pending_ack = DPA1, + qi_pending_ack = QPA1, + ack_in_counter = AckInCount + 1}. + +lookup_pending_ack(SeqId, #vqstate { ram_pending_ack = RPA, + disk_pending_ack = DPA, + qi_pending_ack = QPA}) -> + case gb_trees:lookup(SeqId, RPA) of + {value, V} -> V; + none -> case gb_trees:lookup(SeqId, DPA) of + {value, V} -> V; + none -> gb_trees:get(SeqId, QPA) + end + end. + +%% First parameter = UpdateStats +remove_pending_ack(true, SeqId, State) -> + {MsgStatus, State1} = remove_pending_ack(false, SeqId, State), + {MsgStatus, stats({0, -1}, {MsgStatus, none}, State1)}; +remove_pending_ack(false, SeqId, State = #vqstate{ram_pending_ack = RPA, + disk_pending_ack = DPA, + qi_pending_ack = QPA}) -> + case gb_trees:lookup(SeqId, RPA) of + {value, V} -> RPA1 = gb_trees:delete(SeqId, RPA), + {V, State #vqstate { ram_pending_ack = RPA1 }}; + none -> case gb_trees:lookup(SeqId, DPA) of + {value, V} -> + DPA1 = gb_trees:delete(SeqId, DPA), + {V, State#vqstate{disk_pending_ack = DPA1}}; + none -> + QPA1 = gb_trees:delete(SeqId, QPA), + {gb_trees:get(SeqId, QPA), + State#vqstate{qi_pending_ack = QPA1}} + end + end. + +purge_pending_ack(KeepPersistent, + State = #vqstate { index_state = IndexState, + msg_store_clients = MSCState }) -> + {IndexOnDiskSeqIds, MsgIdsByStore, State1} = purge_pending_ack1(State), + case KeepPersistent of + true -> remove_transient_msgs_by_id(MsgIdsByStore, MSCState), + State1; + false -> IndexState1 = + rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState), + remove_msgs_by_id(MsgIdsByStore, MSCState), + State1 #vqstate { index_state = IndexState1 } + end. + +purge_pending_ack_delete_and_terminate( + State = #vqstate { index_state = IndexState, + msg_store_clients = MSCState }) -> + {_, MsgIdsByStore, State1} = purge_pending_ack1(State), + IndexState1 = rabbit_queue_index:delete_and_terminate(IndexState), + remove_msgs_by_id(MsgIdsByStore, MSCState), + State1 #vqstate { index_state = IndexState1 }. + +purge_pending_ack1(State = #vqstate { ram_pending_ack = RPA, + disk_pending_ack = DPA, + qi_pending_ack = QPA }) -> + F = fun (_SeqId, MsgStatus, Acc) -> accumulate_ack(MsgStatus, Acc) end, + {IndexOnDiskSeqIds, MsgIdsByStore, _AllMsgIds} = + rabbit_misc:gb_trees_fold( + F, rabbit_misc:gb_trees_fold( + F, rabbit_misc:gb_trees_fold( + F, accumulate_ack_init(), RPA), DPA), QPA), + State1 = State #vqstate { ram_pending_ack = gb_trees:empty(), + disk_pending_ack = gb_trees:empty(), + qi_pending_ack = gb_trees:empty()}, + {IndexOnDiskSeqIds, MsgIdsByStore, State1}. + +%% MsgIdsByStore is an orddict with two keys: +%% +%% true: holds a list of Persistent Message Ids. +%% false: holds a list of Transient Message Ids. +%% +%% When we call orddict:to_list/1 we get two sets of msg ids, where +%% IsPersistent is either true for persistent messages or false for +%% transient ones. The msg_store_remove/3 function takes this boolean +%% flag to determine from which store the messages should be removed +%% from. +remove_msgs_by_id(MsgIdsByStore, MSCState) -> + [ok = msg_store_remove(MSCState, IsPersistent, MsgIds) + || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)]. + +remove_transient_msgs_by_id(MsgIdsByStore, MSCState) -> + case orddict:find(false, MsgIdsByStore) of + error -> ok; + {ok, MsgIds} -> ok = msg_store_remove(MSCState, false, MsgIds) + end. + +accumulate_ack_init() -> {[], orddict:new(), []}. + +accumulate_ack(#msg_status { seq_id = SeqId, + msg_id = MsgId, + is_persistent = IsPersistent, + msg_in_store = MsgInStore, + index_on_disk = IndexOnDisk }, + {IndexOnDiskSeqIdsAcc, MsgIdsByStore, AllMsgIds}) -> + {cons_if(IndexOnDisk, SeqId, IndexOnDiskSeqIdsAcc), + case MsgInStore of + true -> rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore); + false -> MsgIdsByStore + end, + [MsgId | AllMsgIds]}. + +%%---------------------------------------------------------------------------- +%% Internal plumbing for confirms (aka publisher acks) +%%---------------------------------------------------------------------------- + +record_confirms(MsgIdSet, State = #vqstate { msgs_on_disk = MOD, + msg_indices_on_disk = MIOD, + unconfirmed = UC, + confirmed = C }) -> + State #vqstate { + msgs_on_disk = rabbit_misc:gb_sets_difference(MOD, MsgIdSet), + msg_indices_on_disk = rabbit_misc:gb_sets_difference(MIOD, MsgIdSet), + unconfirmed = rabbit_misc:gb_sets_difference(UC, MsgIdSet), + confirmed = gb_sets:union(C, MsgIdSet) }. + +msgs_written_to_disk(Callback, MsgIdSet, ignored) -> + Callback(?MODULE, + fun (?MODULE, State) -> record_confirms(MsgIdSet, State) end); +msgs_written_to_disk(Callback, MsgIdSet, written) -> + Callback(?MODULE, + fun (?MODULE, State = #vqstate { msgs_on_disk = MOD, + msg_indices_on_disk = MIOD, + unconfirmed = UC }) -> + Confirmed = gb_sets:intersection(UC, MsgIdSet), + record_confirms(gb_sets:intersection(MsgIdSet, MIOD), + State #vqstate { + msgs_on_disk = + gb_sets:union(MOD, Confirmed) }) + end). + +msg_indices_written_to_disk(Callback, MsgIdSet) -> + Callback(?MODULE, + fun (?MODULE, State = #vqstate { msgs_on_disk = MOD, + msg_indices_on_disk = MIOD, + unconfirmed = UC }) -> + Confirmed = gb_sets:intersection(UC, MsgIdSet), + record_confirms(gb_sets:intersection(MsgIdSet, MOD), + State #vqstate { + msg_indices_on_disk = + gb_sets:union(MIOD, Confirmed) }) + end). + +msgs_and_indices_written_to_disk(Callback, MsgIdSet) -> + Callback(?MODULE, + fun (?MODULE, State) -> record_confirms(MsgIdSet, State) end). + +%%---------------------------------------------------------------------------- +%% Internal plumbing for requeue +%%---------------------------------------------------------------------------- + +publish_alpha(#msg_status { msg = undefined } = MsgStatus, State) -> + {Msg, State1} = read_msg(MsgStatus, State), + MsgStatus1 = MsgStatus#msg_status { msg = Msg }, + {MsgStatus1, stats({1, -1}, {MsgStatus, MsgStatus1}, State1)}; +publish_alpha(MsgStatus, State) -> + {MsgStatus, stats({1, -1}, {MsgStatus, MsgStatus}, State)}. + +publish_beta(MsgStatus, State) -> + {MsgStatus1, State1} = maybe_write_to_disk(true, false, MsgStatus, State), + MsgStatus2 = m(trim_msg_status(MsgStatus1)), + {MsgStatus2, stats({1, -1}, {MsgStatus, MsgStatus2}, State1)}. + +%% Rebuild queue, inserting sequence ids to maintain ordering +queue_merge(SeqIds, Q, MsgIds, Limit, PubFun, State) -> + queue_merge(SeqIds, Q, ?QUEUE:new(), MsgIds, + Limit, PubFun, State). + +queue_merge([SeqId | Rest] = SeqIds, Q, Front, MsgIds, + Limit, PubFun, State) + when Limit == undefined orelse SeqId < Limit -> + case ?QUEUE:out(Q) of + {{value, #msg_status { seq_id = SeqIdQ } = MsgStatus}, Q1} + when SeqIdQ < SeqId -> + %% enqueue from the remaining queue + queue_merge(SeqIds, Q1, ?QUEUE:in(MsgStatus, Front), MsgIds, + Limit, PubFun, State); + {_, _Q1} -> + %% enqueue from the remaining list of sequence ids + {MsgStatus, State1} = msg_from_pending_ack(SeqId, State), + {#msg_status { msg_id = MsgId } = MsgStatus1, State2} = + PubFun(MsgStatus, State1), + queue_merge(Rest, Q, ?QUEUE:in(MsgStatus1, Front), [MsgId | MsgIds], + Limit, PubFun, State2) + end; +queue_merge(SeqIds, Q, Front, MsgIds, + _Limit, _PubFun, State) -> + {SeqIds, ?QUEUE:join(Front, Q), MsgIds, State}. + +delta_merge([], Delta, MsgIds, State) -> + {Delta, MsgIds, State}; +delta_merge(SeqIds, Delta, MsgIds, State) -> + lists:foldl(fun (SeqId, {Delta0, MsgIds0, State0}) -> + {#msg_status { msg_id = MsgId } = MsgStatus, State1} = + msg_from_pending_ack(SeqId, State0), + {_MsgStatus, State2} = + maybe_write_to_disk(true, true, MsgStatus, State1), + {expand_delta(SeqId, Delta0), [MsgId | MsgIds0], + stats({1, -1}, {MsgStatus, none}, State2)} + end, {Delta, MsgIds, State}, SeqIds). + +%% Mostly opposite of record_pending_ack/2 +msg_from_pending_ack(SeqId, State) -> + {#msg_status { msg_props = MsgProps } = MsgStatus, State1} = + remove_pending_ack(false, SeqId, State), + {MsgStatus #msg_status { + msg_props = MsgProps #message_properties { needs_confirming = false } }, + State1}. + +beta_limit(Q) -> + case ?QUEUE:peek(Q) of + {value, #msg_status { seq_id = SeqId }} -> SeqId; + empty -> undefined + end. + +delta_limit(?BLANK_DELTA_PATTERN(_X)) -> undefined; +delta_limit(#delta { start_seq_id = StartSeqId }) -> StartSeqId. + +%%---------------------------------------------------------------------------- +%% Iterator +%%---------------------------------------------------------------------------- + +ram_ack_iterator(State) -> + {ack, gb_trees:iterator(State#vqstate.ram_pending_ack)}. + +disk_ack_iterator(State) -> + {ack, gb_trees:iterator(State#vqstate.disk_pending_ack)}. + +qi_ack_iterator(State) -> + {ack, gb_trees:iterator(State#vqstate.qi_pending_ack)}. + +msg_iterator(State) -> istate(start, State). + +istate(start, State) -> {q4, State#vqstate.q4, State}; +istate(q4, State) -> {q3, State#vqstate.q3, State}; +istate(q3, State) -> {delta, State#vqstate.delta, State}; +istate(delta, State) -> {q2, State#vqstate.q2, State}; +istate(q2, State) -> {q1, State#vqstate.q1, State}; +istate(q1, _State) -> done. + +next({ack, It}, IndexState) -> + case gb_trees:next(It) of + none -> {empty, IndexState}; + {_SeqId, MsgStatus, It1} -> Next = {ack, It1}, + {value, MsgStatus, true, Next, IndexState} + end; +next(done, IndexState) -> {empty, IndexState}; +next({delta, #delta{start_seq_id = SeqId, + end_seq_id = SeqId}, State}, IndexState) -> + next(istate(delta, State), IndexState); +next({delta, #delta{start_seq_id = SeqId, + end_seq_id = SeqIdEnd} = Delta, State}, IndexState) -> + SeqIdB = rabbit_queue_index:next_segment_boundary(SeqId), + SeqId1 = lists:min([SeqIdB, SeqIdEnd]), + {List, IndexState1} = rabbit_queue_index:read(SeqId, SeqId1, IndexState), + next({delta, Delta#delta{start_seq_id = SeqId1}, List, State}, IndexState1); +next({delta, Delta, [], State}, IndexState) -> + next({delta, Delta, State}, IndexState); +next({delta, Delta, [{_, SeqId, _, _, _} = M | Rest], State}, IndexState) -> + case is_msg_in_pending_acks(SeqId, State) of + false -> Next = {delta, Delta, Rest, State}, + {value, beta_msg_status(M), false, Next, IndexState}; + true -> next({delta, Delta, Rest, State}, IndexState) + end; +next({Key, Q, State}, IndexState) -> + case ?QUEUE:out(Q) of + {empty, _Q} -> next(istate(Key, State), IndexState); + {{value, MsgStatus}, QN} -> Next = {Key, QN, State}, + {value, MsgStatus, false, Next, IndexState} + end. + +inext(It, {Its, IndexState}) -> + case next(It, IndexState) of + {empty, IndexState1} -> + {Its, IndexState1}; + {value, MsgStatus1, Unacked, It1, IndexState1} -> + {[{MsgStatus1, Unacked, It1} | Its], IndexState1} + end. + +ifold(_Fun, Acc, [], State) -> + {Acc, State}; +ifold(Fun, Acc, Its, State) -> + [{MsgStatus, Unacked, It} | Rest] = + lists:sort(fun ({#msg_status{seq_id = SeqId1}, _, _}, + {#msg_status{seq_id = SeqId2}, _, _}) -> + SeqId1 =< SeqId2 + end, Its), + {Msg, State1} = read_msg(MsgStatus, State), + case Fun(Msg, MsgStatus#msg_status.msg_props, Unacked, Acc) of + {stop, Acc1} -> + {Acc1, State}; + {cont, Acc1} -> + {Its1, IndexState1} = inext(It, {Rest, State1#vqstate.index_state}), + ifold(Fun, Acc1, Its1, State1#vqstate{index_state = IndexState1}) + end. + +%%---------------------------------------------------------------------------- +%% Phase changes +%%---------------------------------------------------------------------------- + +reduce_memory_use(State = #vqstate { target_ram_count = infinity }) -> + State; +reduce_memory_use(State = #vqstate { + mode = default, + ram_pending_ack = RPA, + ram_msg_count = RamMsgCount, + target_ram_count = TargetRamCount, + io_batch_size = IoBatchSize, + rates = #rates { in = AvgIngress, + out = AvgEgress, + ack_in = AvgAckIngress, + ack_out = AvgAckEgress } }) -> + + State1 = #vqstate { q2 = Q2, q3 = Q3 } = + case chunk_size(RamMsgCount + gb_trees:size(RPA), TargetRamCount) of + 0 -> State; + %% Reduce memory of pending acks and alphas. The order is + %% determined based on which is growing faster. Whichever + %% comes second may very well get a quota of 0 if the + %% first manages to push out the max number of messages. + S1 -> Funs = case ((AvgAckIngress - AvgAckEgress) > + (AvgIngress - AvgEgress)) of + true -> [fun limit_ram_acks/2, + fun push_alphas_to_betas/2]; + false -> [fun push_alphas_to_betas/2, + fun limit_ram_acks/2] + end, + {_, State2} = lists:foldl(fun (ReduceFun, {QuotaN, StateN}) -> + ReduceFun(QuotaN, StateN) + end, {S1, State}, Funs), + State2 + end, + + State3 = + case chunk_size(?QUEUE:len(Q2) + ?QUEUE:len(Q3), + permitted_beta_count(State1)) of + S2 when S2 >= IoBatchSize -> + %% There is an implicit, but subtle, upper bound here. We + %% may shuffle a lot of messages from Q2/3 into delta, but + %% the number of these that require any disk operation, + %% namely index writing, i.e. messages that are genuine + %% betas and not gammas, is bounded by the credit_flow + %% limiting of the alpha->beta conversion above. + push_betas_to_deltas(S2, State1); + _ -> + State1 + end, + %% See rabbitmq-server-290 for the reasons behind this GC call. + garbage_collect(), + State3; +%% When using lazy queues, there are no alphas, so we don't need to +%% call push_alphas_to_betas/2. +reduce_memory_use(State = #vqstate { + mode = lazy, + ram_pending_ack = RPA, + ram_msg_count = RamMsgCount, + target_ram_count = TargetRamCount }) -> + State1 = #vqstate { q3 = Q3 } = + case chunk_size(RamMsgCount + gb_trees:size(RPA), TargetRamCount) of + 0 -> State; + S1 -> {_, State2} = limit_ram_acks(S1, State), + State2 + end, + + State3 = + case chunk_size(?QUEUE:len(Q3), + permitted_beta_count(State1)) of + 0 -> + State1; + S2 -> + push_betas_to_deltas(S2, State1) + end, + garbage_collect(), + State3. + +limit_ram_acks(0, State) -> + {0, ui(State)}; +limit_ram_acks(Quota, State = #vqstate { ram_pending_ack = RPA, + disk_pending_ack = DPA }) -> + case gb_trees:is_empty(RPA) of + true -> + {Quota, ui(State)}; + false -> + {SeqId, MsgStatus, RPA1} = gb_trees:take_largest(RPA), + {MsgStatus1, State1} = + maybe_prepare_write_to_disk(true, false, MsgStatus, State), + MsgStatus2 = m(trim_msg_status(MsgStatus1)), + DPA1 = gb_trees:insert(SeqId, MsgStatus2, DPA), + limit_ram_acks(Quota - 1, + stats({0, 0}, {MsgStatus, MsgStatus2}, + State1 #vqstate { ram_pending_ack = RPA1, + disk_pending_ack = DPA1 })) + end. + +permitted_beta_count(#vqstate { len = 0 }) -> + infinity; +permitted_beta_count(#vqstate { mode = lazy, + target_ram_count = TargetRamCount}) -> + TargetRamCount; +permitted_beta_count(#vqstate { target_ram_count = 0, q3 = Q3 }) -> + lists:min([?QUEUE:len(Q3), rabbit_queue_index:next_segment_boundary(0)]); +permitted_beta_count(#vqstate { q1 = Q1, + q4 = Q4, + target_ram_count = TargetRamCount, + len = Len }) -> + BetaDelta = Len - ?QUEUE:len(Q1) - ?QUEUE:len(Q4), + lists:max([rabbit_queue_index:next_segment_boundary(0), + BetaDelta - ((BetaDelta * BetaDelta) div + (BetaDelta + TargetRamCount))]). + +chunk_size(Current, Permitted) + when Permitted =:= infinity orelse Permitted >= Current -> + 0; +chunk_size(Current, Permitted) -> + Current - Permitted. + +fetch_from_q3(State = #vqstate { mode = default, + q1 = Q1, + q2 = Q2, + delta = #delta { count = DeltaCount }, + q3 = Q3, + q4 = Q4 }) -> + case ?QUEUE:out(Q3) of + {empty, _Q3} -> + {empty, State}; + {{value, MsgStatus}, Q3a} -> + State1 = State #vqstate { q3 = Q3a }, + State2 = case {?QUEUE:is_empty(Q3a), 0 == DeltaCount} of + {true, true} -> + %% q3 is now empty, it wasn't before; + %% delta is still empty. So q2 must be + %% empty, and we know q4 is empty + %% otherwise we wouldn't be loading from + %% q3. As such, we can just set q4 to Q1. + true = ?QUEUE:is_empty(Q2), %% ASSERTION + true = ?QUEUE:is_empty(Q4), %% ASSERTION + State1 #vqstate { q1 = ?QUEUE:new(), q4 = Q1 }; + {true, false} -> + maybe_deltas_to_betas(State1); + {false, _} -> + %% q3 still isn't empty, we've not + %% touched delta, so the invariants + %% between q1, q2, delta and q3 are + %% maintained + State1 + end, + {loaded, {MsgStatus, State2}} + end; +%% lazy queues +fetch_from_q3(State = #vqstate { mode = lazy, + delta = #delta { count = DeltaCount }, + q3 = Q3 }) -> + case ?QUEUE:out(Q3) of + {empty, _Q3} when DeltaCount =:= 0 -> + {empty, State}; + {empty, _Q3} -> + fetch_from_q3(maybe_deltas_to_betas(State)); + {{value, MsgStatus}, Q3a} -> + State1 = State #vqstate { q3 = Q3a }, + {loaded, {MsgStatus, State1}} + end. + +maybe_deltas_to_betas(State) -> + AfterFun = process_delivers_and_acks_fun(deliver_and_ack), + maybe_deltas_to_betas(AfterFun, State). + +maybe_deltas_to_betas(_DelsAndAcksFun, + State = #vqstate {delta = ?BLANK_DELTA_PATTERN(X) }) -> + State; +maybe_deltas_to_betas(DelsAndAcksFun, + State = #vqstate { + q2 = Q2, + delta = Delta, + q3 = Q3, + index_state = IndexState, + ram_msg_count = RamMsgCount, + ram_bytes = RamBytes, + disk_read_count = DiskReadCount, + transient_threshold = TransientThreshold }) -> + #delta { start_seq_id = DeltaSeqId, + count = DeltaCount, + end_seq_id = DeltaSeqIdEnd } = Delta, + DeltaSeqId1 = + lists:min([rabbit_queue_index:next_segment_boundary(DeltaSeqId), + DeltaSeqIdEnd]), + {List, IndexState1} = rabbit_queue_index:read(DeltaSeqId, DeltaSeqId1, + IndexState), + {Q3a, RamCountsInc, RamBytesInc, State1} = + betas_from_index_entries(List, TransientThreshold, + DelsAndAcksFun, + State #vqstate { index_state = IndexState1 }), + State2 = State1 #vqstate { ram_msg_count = RamMsgCount + RamCountsInc, + ram_bytes = RamBytes + RamBytesInc, + disk_read_count = DiskReadCount + RamCountsInc }, + case ?QUEUE:len(Q3a) of + 0 -> + %% we ignored every message in the segment due to it being + %% transient and below the threshold + maybe_deltas_to_betas( + DelsAndAcksFun, + State2 #vqstate { + delta = d(Delta #delta { start_seq_id = DeltaSeqId1 })}); + Q3aLen -> + Q3b = ?QUEUE:join(Q3, Q3a), + case DeltaCount - Q3aLen of + 0 -> + %% delta is now empty, but it wasn't before, so + %% can now join q2 onto q3 + State2 #vqstate { q2 = ?QUEUE:new(), + delta = ?BLANK_DELTA, + q3 = ?QUEUE:join(Q3b, Q2) }; + N when N > 0 -> + Delta1 = d(#delta { start_seq_id = DeltaSeqId1, + count = N, + end_seq_id = DeltaSeqIdEnd }), + State2 #vqstate { delta = Delta1, + q3 = Q3b } + end + end. + +push_alphas_to_betas(Quota, State) -> + {Quota1, State1} = + push_alphas_to_betas( + fun ?QUEUE:out/1, + fun (MsgStatus, Q1a, + State0 = #vqstate { q3 = Q3, delta = #delta { count = 0 } }) -> + State0 #vqstate { q1 = Q1a, q3 = ?QUEUE:in(MsgStatus, Q3) }; + (MsgStatus, Q1a, State0 = #vqstate { q2 = Q2 }) -> + State0 #vqstate { q1 = Q1a, q2 = ?QUEUE:in(MsgStatus, Q2) } + end, Quota, State #vqstate.q1, State), + {Quota2, State2} = + push_alphas_to_betas( + fun ?QUEUE:out_r/1, + fun (MsgStatus, Q4a, State0 = #vqstate { q3 = Q3 }) -> + State0 #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3), q4 = Q4a } + end, Quota1, State1 #vqstate.q4, State1), + {Quota2, State2}. + +push_alphas_to_betas(_Generator, _Consumer, Quota, _Q, + State = #vqstate { ram_msg_count = RamMsgCount, + target_ram_count = TargetRamCount }) + when Quota =:= 0 orelse + TargetRamCount =:= infinity orelse + TargetRamCount >= RamMsgCount -> + {Quota, ui(State)}; +push_alphas_to_betas(Generator, Consumer, Quota, Q, State) -> + %% We consume credits from the message_store whenever we need to + %% persist a message to disk. See: + %% rabbit_variable_queue:msg_store_write/4. So perhaps the + %% msg_store is trying to throttle down our queue. + case credit_flow:blocked() of + true -> {Quota, ui(State)}; + false -> case Generator(Q) of + {empty, _Q} -> + {Quota, ui(State)}; + {{value, MsgStatus}, Qa} -> + {MsgStatus1, State1} = + maybe_prepare_write_to_disk(true, false, MsgStatus, + State), + MsgStatus2 = m(trim_msg_status(MsgStatus1)), + State2 = stats( + ready0, {MsgStatus, MsgStatus2}, State1), + State3 = Consumer(MsgStatus2, Qa, State2), + push_alphas_to_betas(Generator, Consumer, Quota - 1, + Qa, State3) + end + end. + +push_betas_to_deltas(Quota, State = #vqstate { mode = default, + q2 = Q2, + delta = Delta, + q3 = Q3}) -> + PushState = {Quota, Delta, State}, + {Q3a, PushState1} = push_betas_to_deltas( + fun ?QUEUE:out_r/1, + fun rabbit_queue_index:next_segment_boundary/1, + Q3, PushState), + {Q2a, PushState2} = push_betas_to_deltas( + fun ?QUEUE:out/1, + fun (Q2MinSeqId) -> Q2MinSeqId end, + Q2, PushState1), + {_, Delta1, State1} = PushState2, + State1 #vqstate { q2 = Q2a, + delta = Delta1, + q3 = Q3a }; +%% In the case of lazy queues we want to page as many messages as +%% possible from q3. +push_betas_to_deltas(Quota, State = #vqstate { mode = lazy, + delta = Delta, + q3 = Q3}) -> + PushState = {Quota, Delta, State}, + {Q3a, PushState1} = push_betas_to_deltas( + fun ?QUEUE:out_r/1, + fun (Q2MinSeqId) -> Q2MinSeqId end, + Q3, PushState), + {_, Delta1, State1} = PushState1, + State1 #vqstate { delta = Delta1, + q3 = Q3a }. + + +push_betas_to_deltas(Generator, LimitFun, Q, PushState) -> + case ?QUEUE:is_empty(Q) of + true -> + {Q, PushState}; + false -> + {value, #msg_status { seq_id = MinSeqId }} = ?QUEUE:peek(Q), + {value, #msg_status { seq_id = MaxSeqId }} = ?QUEUE:peek_r(Q), + Limit = LimitFun(MinSeqId), + case MaxSeqId < Limit of + true -> {Q, PushState}; + false -> push_betas_to_deltas1(Generator, Limit, Q, PushState) + end + end. + +push_betas_to_deltas1(_Generator, _Limit, Q, {0, Delta, State}) -> + {Q, {0, Delta, ui(State)}}; +push_betas_to_deltas1(Generator, Limit, Q, {Quota, Delta, State}) -> + case Generator(Q) of + {empty, _Q} -> + {Q, {Quota, Delta, ui(State)}}; + {{value, #msg_status { seq_id = SeqId }}, _Qa} + when SeqId < Limit -> + {Q, {Quota, Delta, ui(State)}}; + {{value, MsgStatus = #msg_status { seq_id = SeqId }}, Qa} -> + {#msg_status { index_on_disk = true }, State1} = + maybe_batch_write_index_to_disk(true, MsgStatus, State), + State2 = stats(ready0, {MsgStatus, none}, State1), + Delta1 = expand_delta(SeqId, Delta), + push_betas_to_deltas1(Generator, Limit, Qa, + {Quota - 1, Delta1, State2}) + end. + +%% Flushes queue index batch caches and updates queue index state. +ui(#vqstate{index_state = IndexState, + target_ram_count = TargetRamCount} = State) -> + IndexState1 = rabbit_queue_index:flush_pre_publish_cache( + TargetRamCount, IndexState), + State#vqstate{index_state = IndexState1}. + +%% Delay +maybe_delay(QPA) -> + case is_timeout_test(gb_trees:values(QPA)) of + true -> receive + %% The queue received an EXIT message, it's probably the + %% node being stopped with "rabbitmqctl stop". Thus, abort + %% the wait and requeue the EXIT message. + {'EXIT', _, shutdown} = ExitMsg -> self() ! ExitMsg, + void + after infinity -> void + end; + _ -> void + end. + +is_timeout_test([]) -> false; +is_timeout_test([#msg_status{ + msg = #basic_message{ + content = #content{ + payload_fragments_rev = PFR}}}|Rem]) -> + case lists:member(?TIMEOUT_TEST_MSG, PFR) of + T = true -> T; + _ -> is_timeout_test(Rem) + end; +is_timeout_test([_|Rem]) -> is_timeout_test(Rem). + +%%---------------------------------------------------------------------------- +%% Upgrading +%%---------------------------------------------------------------------------- + +multiple_routing_keys() -> + transform_storage( + fun ({basic_message, ExchangeName, Routing_Key, Content, + MsgId, Persistent}) -> + {ok, {basic_message, ExchangeName, [Routing_Key], Content, + MsgId, Persistent}}; + (_) -> {error, corrupt_message} + end), + ok. + + +%% Assumes message store is not running +transform_storage(TransformFun) -> + transform_store(?PERSISTENT_MSG_STORE, TransformFun), + transform_store(?TRANSIENT_MSG_STORE, TransformFun). + +transform_store(Store, TransformFun) -> + rabbit_msg_store:force_recovery(rabbit_mnesia:dir(), Store), + rabbit_msg_store:transform_dir(rabbit_mnesia:dir(), Store, TransformFun). diff --git a/test/cluster_rename_SUITE.erl b/test/cluster_rename_SUITE.erl new file mode 100644 index 000000000000..8ce29a6695e7 --- /dev/null +++ b/test/cluster_rename_SUITE.erl @@ -0,0 +1,304 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(cluster_rename_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile(export_all). + +all() -> + [ + {group, cluster_size_2}, + {group, cluster_size_3} + ]. + +groups() -> + [ + {cluster_size_2, [], [ + % XXX post_change_nodename, + abortive_rename, + rename_fail, + rename_twice_fail + ]}, + {cluster_size_3, [], [ + rename_cluster_one_by_one, + rename_cluster_big_bang, + partial_one_by_one, + partial_big_bang + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(cluster_size_2, Config) -> + rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_count, 2} %% Replaced with a list of node names later. + ]); +init_per_group(cluster_size_3, Config) -> + rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_count, 3} %% Replaced with a list of node names later. + ]). + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + ClusterSize = ?config(rmq_nodes_count, Config), + Nodenames = [ + list_to_atom(rabbit_misc:format("~s-~b", [Testcase, I])) + || I <- lists:seq(1, ClusterSize) + ], + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_count, Nodenames}, + {rmq_nodes_clustered, true} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_testcase(Testcase, Config) -> + Config1 = case rabbit_ct_helpers:get_config(Config, save_config) of + undefined -> Config; + C -> C + end, + Config2 = rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config2, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +%% Rolling rename of a cluster, each node should do a secondary rename. +rename_cluster_one_by_one(Config) -> + [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs( + Config, nodename), + publish_all(Config, + [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]), + + Config1 = stop_rename_start(Config, Node1, [Node1, jessica]), + Config2 = stop_rename_start(Config1, Node2, [Node2, hazel]), + Config3 = stop_rename_start(Config2, Node3, [Node3, flopsy]), + + [Jessica, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs( + Config3, nodename), + consume_all(Config3, + [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]), + {save_config, Config3}. + +%% Big bang rename of a cluster, Node1 should do a primary rename. +rename_cluster_big_bang(Config) -> + [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config, + nodename), + publish_all(Config, + [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Node3), + ok = rabbit_ct_broker_helpers:stop_node(Config, Node2), + ok = rabbit_ct_broker_helpers:stop_node(Config, Node1), + + Map = [Node1, jessica, Node2, hazel, Node3, flopsy], + Config1 = rename_node(Config, Node1, Map), + Config2 = rename_node(Config1, Node2, Map), + Config3 = rename_node(Config2, Node3, Map), + + [Jessica, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs( + Config3, nodename), + ok = rabbit_ct_broker_helpers:start_node(Config3, Jessica), + ok = rabbit_ct_broker_helpers:start_node(Config3, Hazel), + ok = rabbit_ct_broker_helpers:start_node(Config3, Flopsy), + + consume_all(Config3, + [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]), + {save_config, Config3}. + +%% Here we test that Node1 copes with things being renamed around it. +partial_one_by_one(Config) -> + [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config, + nodename), + publish_all(Config, + [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]), + + Config1 = stop_rename_start(Config, Node1, [Node1, jessica]), + Config2 = stop_rename_start(Config1, Node2, [Node2, hazel]), + + [Jessica, Hazel, Node3] = rabbit_ct_broker_helpers:get_node_configs( + Config2, nodename), + consume_all(Config2, + [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Node3, <<"3">>}]), + {save_config, Config2}. + +%% Here we test that Node1 copes with things being renamed around it. +partial_big_bang(Config) -> + [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config, + nodename), + publish_all(Config, + [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Node3), + ok = rabbit_ct_broker_helpers:stop_node(Config, Node2), + ok = rabbit_ct_broker_helpers:stop_node(Config, Node1), + + Map = [Node2, hazel, Node3, flopsy], + Config1 = rename_node(Config, Node2, Map), + Config2 = rename_node(Config1, Node3, Map), + + [Node1, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs(Config2, + nodename), + ok = rabbit_ct_broker_helpers:start_node(Config2, Node1), + ok = rabbit_ct_broker_helpers:start_node(Config2, Hazel), + ok = rabbit_ct_broker_helpers:start_node(Config2, Flopsy), + + consume_all(Config2, + [{Node1, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]), + {save_config, Config2}. + +% XXX %% We should be able to specify the -n parameter on ctl with either +% XXX %% the before or after name for the local node (since in real cases +% XXX %% one might want to invoke the command before or after the hostname +% XXX %% has changed) - usually we test before so here we test after. +% XXX post_change_nodename([Node1, _Bigwig]) -> +% XXX publish(Node1, <<"Node1">>), +% XXX +% XXX Bugs1 = rabbit_test_configs:stop_node(Node1), +% XXX Bugs2 = [{nodename, jessica} | proplists:delete(nodename, Bugs1)], +% XXX Jessica0 = rename_node(Bugs2, jessica, [Node1, jessica]), +% XXX Jessica = rabbit_test_configs:start_node(Jessica0), +% XXX +% XXX consume(Jessica, <<"Node1">>), +% XXX stop_all([Jessica]), +% XXX ok. + +%% If we invoke rename but the node name does not actually change, we +%% should roll back. +abortive_rename(Config) -> + Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + publish(Config, Node1, <<"Node1">>), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Node1), + _Config1 = rename_node(Config, Node1, [Node1, jessica]), + ok = rabbit_ct_broker_helpers:start_node(Config, Node1), + + consume(Config, Node1, <<"Node1">>), + ok. + +%% And test some ways the command can fail. +rename_fail(Config) -> + [Node1, Node2] = rabbit_ct_broker_helpers:get_node_configs(Config, + nodename), + ok = rabbit_ct_broker_helpers:stop_node(Config, Node1), + %% Rename from a node that does not exist + ok = rename_node_fail(Config, Node1, [bugzilla, jessica]), + %% Rename to a node which does + ok = rename_node_fail(Config, Node1, [Node1, Node2]), + %% Rename two nodes to the same thing + ok = rename_node_fail(Config, Node1, [Node1, jessica, Node2, jessica]), + %% Rename while impersonating a node not in the cluster + Config1 = rabbit_ct_broker_helpers:set_node_config(Config, Node1, + {nodename, 'rabbit@localhost'}), + ok = rename_node_fail(Config1, Node1, [Node1, jessica]), + ok. + +rename_twice_fail(Config) -> + Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + ok = rabbit_ct_broker_helpers:stop_node(Config, Node1), + Config1 = rename_node(Config, Node1, [Node1, indecisive]), + ok = rename_node_fail(Config, Node1, [indecisive, jessica]), + {save_config, Config1}. + +%% ---------------------------------------------------------------------------- + +stop_rename_start(Config, Nodename, Map) -> + ok = rabbit_ct_broker_helpers:stop_node(Config, Nodename), + Config1 = rename_node(Config, Nodename, Map), + ok = rabbit_ct_broker_helpers:start_node(Config1, Nodename), + Config1. + +rename_node(Config, Nodename, Map) -> + {ok, Config1} = do_rename_node(Config, Nodename, Map), + Config1. + +rename_node_fail(Config, Nodename, Map) -> + error = do_rename_node(Config, Nodename, Map), + ok. + +do_rename_node(Config, Nodename, Map) -> + Map1 = [ + begin + NStr = atom_to_list(N), + case lists:member($@, NStr) of + true -> N; + false -> rabbit_nodes:make({NStr, "localhost"}) + end + end + || N <- Map + ], + Ret = rabbit_ct_broker_helpers:rabbitmqctl(Config, Nodename, + ["rename_cluster_node" | Map1]), + case Ret of + {ok, _} -> + Config1 = update_config_after_rename(Config, Map1), + {ok, Config1}; + {error, _, _} -> + error + end. + +update_config_after_rename(Config, [Old, New | Rest]) -> + Config1 = rabbit_ct_broker_helpers:set_node_config(Config, Old, + {nodename, New}), + update_config_after_rename(Config1, Rest); +update_config_after_rename(Config, []) -> + Config. + +publish(Config, Node, Q) -> + Ch = rabbit_ct_client_helpers:open_channel(Config, Node), + amqp_channel:call(Ch, #'confirm.select'{}), + amqp_channel:call(Ch, #'queue.declare'{queue = Q, durable = true}), + amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q}, + #amqp_msg{props = #'P_basic'{delivery_mode = 2}, + payload = Q}), + amqp_channel:wait_for_confirms(Ch), + rabbit_ct_client_helpers:close_channels_and_connection(Config, Node). + +consume(Config, Node, Q) -> + Ch = rabbit_ct_client_helpers:open_channel(Config, Node), + amqp_channel:call(Ch, #'queue.declare'{queue = Q, durable = true}), + {#'basic.get_ok'{}, #amqp_msg{payload = Q}} = + amqp_channel:call(Ch, #'basic.get'{queue = Q}), + rabbit_ct_client_helpers:close_channels_and_connection(Config, Node). + + +publish_all(Config, Nodes) -> + [publish(Config, Node, Key) || {Node, Key} <- Nodes]. + +consume_all(Config, Nodes) -> + [consume(Config, Node, Key) || {Node, Key} <- Nodes]. + +set_node(Nodename, Cfg) -> + [{nodename, Nodename} | proplists:delete(nodename, Cfg)]. diff --git a/test/clustering_management_SUITE.erl b/test/clustering_management_SUITE.erl new file mode 100644 index 000000000000..00ddfa48a29c --- /dev/null +++ b/test/clustering_management_SUITE.erl @@ -0,0 +1,728 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(clustering_management_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile(export_all). + +-define(LOOP_RECURSION_DELAY, 100). + +all() -> + [ + {group, unclustered}, + {group, clustered} + ]. + +groups() -> + [ + {unclustered, [], [ + {cluster_size_2, [], [ + erlang_config + ]}, + {cluster_size_3, [], [ + join_and_part_cluster, + join_cluster_bad_operations, + join_to_start_interval, + forget_cluster_node, + change_cluster_node_type, + change_cluster_when_node_offline, + update_cluster_nodes, + force_reset_node + ]} + ]}, + {clustered, [], [ + {cluster_size_2, [], [ + forget_removes_things, + reset_removes_things, + forget_offline_removes_things, + force_boot, + status_with_alarm + ]}, + {cluster_size_4, [], [ + forget_promotes_offline_slave + ]} + ]} + ]. + +suite() -> + [ + %% If a test hangs, no need to wait for 30 minutes. + {timetrap, {minutes, 5}} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(unclustered, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]); +init_per_group(clustered, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]); +init_per_group(cluster_size_2, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]); +init_per_group(cluster_size_3, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]); +init_per_group(cluster_size_4, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 4}]). + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + ClusterSize = ?config(rmq_nodes_count, Config), + TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, Testcase}, + {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +join_and_part_cluster(Config) -> + [Rabbit, Hare, Bunny] = cluster_members(Config), + assert_not_clustered(Rabbit), + assert_not_clustered(Hare), + assert_not_clustered(Bunny), + + stop_join_start(Rabbit, Bunny), + assert_clustered([Rabbit, Bunny]), + + stop_join_start(Hare, Bunny, true), + assert_cluster_status( + {[Bunny, Hare, Rabbit], [Bunny, Rabbit], [Bunny, Hare, Rabbit]}, + [Rabbit, Hare, Bunny]), + + %% Allow clustering with already clustered node + ok = stop_app(Rabbit), + {ok, already_member} = join_cluster(Rabbit, Hare), + ok = start_app(Rabbit), + + stop_reset_start(Rabbit), + assert_not_clustered(Rabbit), + assert_cluster_status({[Bunny, Hare], [Bunny], [Bunny, Hare]}, + [Hare, Bunny]), + + stop_reset_start(Hare), + assert_not_clustered(Hare), + assert_not_clustered(Bunny). + +join_cluster_bad_operations(Config) -> + [Rabbit, Hare, Bunny] = cluster_members(Config), + + %% Non-existant node + ok = stop_app(Rabbit), + assert_failure(fun () -> join_cluster(Rabbit, non@existant) end), + ok = start_app(Rabbit), + assert_not_clustered(Rabbit), + + %% Trying to cluster with mnesia running + assert_failure(fun () -> join_cluster(Rabbit, Bunny) end), + assert_not_clustered(Rabbit), + + %% Trying to cluster the node with itself + ok = stop_app(Rabbit), + assert_failure(fun () -> join_cluster(Rabbit, Rabbit) end), + ok = start_app(Rabbit), + assert_not_clustered(Rabbit), + + %% Do not let the node leave the cluster or reset if it's the only + %% ram node + stop_join_start(Hare, Rabbit, true), + assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]}, + [Rabbit, Hare]), + ok = stop_app(Hare), + assert_failure(fun () -> join_cluster(Rabbit, Bunny) end), + assert_failure(fun () -> reset(Rabbit) end), + ok = start_app(Hare), + assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]}, + [Rabbit, Hare]), + + %% Cannot start RAM-only node first + ok = stop_app(Rabbit), + ok = stop_app(Hare), + assert_failure(fun () -> start_app(Hare) end), + ok = start_app(Rabbit), + ok = start_app(Hare), + ok. + +%% This tests that the nodes in the cluster are notified immediately of a node +%% join, and not just after the app is started. +join_to_start_interval(Config) -> + [Rabbit, Hare, _Bunny] = cluster_members(Config), + + ok = stop_app(Rabbit), + ok = join_cluster(Rabbit, Hare), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, + [Rabbit, Hare]), + ok = start_app(Rabbit), + assert_clustered([Rabbit, Hare]). + +forget_cluster_node(Config) -> + [Rabbit, Hare, Bunny] = cluster_members(Config), + + %% Trying to remove a node not in the cluster should fail + assert_failure(fun () -> forget_cluster_node(Hare, Rabbit) end), + + stop_join_start(Rabbit, Hare), + assert_clustered([Rabbit, Hare]), + + %% Trying to remove an online node should fail + assert_failure(fun () -> forget_cluster_node(Hare, Rabbit) end), + + ok = stop_app(Rabbit), + %% We're passing the --offline flag, but Hare is online + assert_failure(fun () -> forget_cluster_node(Hare, Rabbit, true) end), + %% Removing some non-existant node will fail + assert_failure(fun () -> forget_cluster_node(Hare, non@existant) end), + ok = forget_cluster_node(Hare, Rabbit), + assert_not_clustered(Hare), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, + [Rabbit]), + + %% Now we can't start Rabbit since it thinks that it's still in the cluster + %% with Hare, while Hare disagrees. + assert_failure(fun () -> start_app(Rabbit) end), + + ok = reset(Rabbit), + ok = start_app(Rabbit), + assert_not_clustered(Rabbit), + + %% Now we remove Rabbit from an offline node. + stop_join_start(Bunny, Hare), + stop_join_start(Rabbit, Hare), + assert_clustered([Rabbit, Hare, Bunny]), + ok = stop_app(Hare), + ok = stop_app(Rabbit), + ok = stop_app(Bunny), + %% This is fine but we need the flag + assert_failure(fun () -> forget_cluster_node(Hare, Bunny) end), + %% Also fails because hare node is still running + assert_failure(fun () -> forget_cluster_node(Hare, Bunny, true) end), + %% But this works + ok = rabbit_ct_broker_helpers:stop_node(Config, Hare), + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare, + ["forget_cluster_node", "--offline", Bunny]), + ok = rabbit_ct_broker_helpers:start_node(Config, Hare), + ok = start_app(Rabbit), + %% Bunny still thinks its clustered with Rabbit and Hare + assert_failure(fun () -> start_app(Bunny) end), + ok = reset(Bunny), + ok = start_app(Bunny), + assert_not_clustered(Bunny), + assert_clustered([Rabbit, Hare]). + +forget_removes_things(Config) -> + test_removes_things(Config, fun (R, H) -> ok = forget_cluster_node(H, R) end). + +reset_removes_things(Config) -> + test_removes_things(Config, fun (R, _H) -> ok = reset(R) end). + +test_removes_things(Config, LoseRabbit) -> + Unmirrored = <<"unmirrored-queue">>, + [Rabbit, Hare] = cluster_members(Config), + RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit), + declare(RCh, Unmirrored), + ok = stop_app(Rabbit), + + HCh = rabbit_ct_client_helpers:open_channel(Config, Hare), + {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} = + (catch declare(HCh, Unmirrored)), + + ok = LoseRabbit(Rabbit, Hare), + HCh2 = rabbit_ct_client_helpers:open_channel(Config, Hare), + declare(HCh2, Unmirrored), + ok. + +forget_offline_removes_things(Config) -> + [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, + nodename), + Unmirrored = <<"unmirrored-queue">>, + X = <<"X">>, + RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit), + declare(RCh, Unmirrored), + + amqp_channel:call(RCh, #'exchange.declare'{durable = true, + exchange = X, + auto_delete = true}), + amqp_channel:call(RCh, #'queue.bind'{queue = Unmirrored, + exchange = X}), + ok = rabbit_ct_broker_helpers:stop_broker(Config, Rabbit), + + HCh = rabbit_ct_client_helpers:open_channel(Config, Hare), + {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} = + (catch declare(HCh, Unmirrored)), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Hare), + ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit), + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare, + ["forget_cluster_node", "--offline", Rabbit]), + ok = rabbit_ct_broker_helpers:start_node(Config, Hare), + + HCh2 = rabbit_ct_client_helpers:open_channel(Config, Hare), + declare(HCh2, Unmirrored), + {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} = + (catch amqp_channel:call(HCh2,#'exchange.declare'{durable = true, + exchange = X, + auto_delete = true, + passive = true})), + ok. + +forget_promotes_offline_slave(Config) -> + [A, B, C, D] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + ACh = rabbit_ct_client_helpers:open_channel(Config, A), + Q = <<"mirrored-queue">>, + declare(ACh, Q), + set_ha_policy(Config, Q, A, [B, C]), + set_ha_policy(Config, Q, A, [C, D]), %% Test add and remove from recoverable_slaves + + %% Publish and confirm + amqp_channel:call(ACh, #'confirm.select'{}), + amqp_channel:cast(ACh, #'basic.publish'{routing_key = Q}, + #amqp_msg{props = #'P_basic'{delivery_mode = 2}}), + amqp_channel:wait_for_confirms(ACh), + + %% We kill nodes rather than stop them in order to make sure + %% that we aren't dependent on anything that happens as they shut + %% down (see bug 26467). + ok = rabbit_ct_broker_helpers:kill_node(Config, D), + ok = rabbit_ct_broker_helpers:kill_node(Config, C), + ok = rabbit_ct_broker_helpers:kill_node(Config, B), + ok = rabbit_ct_broker_helpers:kill_node(Config, A), + + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, C, + ["force_boot"]), + + ok = rabbit_ct_broker_helpers:start_node(Config, C), + + %% We should now have the following dramatis personae: + %% A - down, master + %% B - down, used to be slave, no longer is, never had the message + %% C - running, should be slave, but has wiped the message on restart + %% D - down, recoverable slave, contains message + %% + %% So forgetting A should offline-promote the queue to D, keeping + %% the message. + + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, C, + ["forget_cluster_node", A]), + + ok = rabbit_ct_broker_helpers:start_node(Config, D), + DCh2 = rabbit_ct_client_helpers:open_channel(Config, D), + #'queue.declare_ok'{message_count = 1} = declare(DCh2, Q), + ok. + +set_ha_policy(Config, Q, Master, Slaves) -> + Nodes = [list_to_binary(atom_to_list(N)) || N <- [Master | Slaves]], + rabbit_ct_broker_helpers:set_ha_policy(Config, Master, Q, + {<<"nodes">>, Nodes}), + await_slaves(Q, Master, Slaves). + +await_slaves(Q, Master, Slaves) -> + {ok, #amqqueue{pid = MPid, + slave_pids = SPids}} = + rpc:call(Master, rabbit_amqqueue, lookup, + [rabbit_misc:r(<<"/">>, queue, Q)]), + ActMaster = node(MPid), + ActSlaves = lists:usort([node(P) || P <- SPids]), + case {Master, lists:usort(Slaves)} of + {ActMaster, ActSlaves} -> ok; + _ -> timer:sleep(100), + await_slaves(Q, Master, Slaves) + end. + +force_boot(Config) -> + [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, + nodename), + {error, _, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit, + ["force_boot"]), + ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit), + ok = rabbit_ct_broker_helpers:stop_node(Config, Hare), + {error, _} = rabbit_ct_broker_helpers:start_node(Config, Rabbit), + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit, + ["force_boot"]), + ok = rabbit_ct_broker_helpers:start_node(Config, Rabbit), + ok. + +change_cluster_node_type(Config) -> + [Rabbit, Hare, _Bunny] = cluster_members(Config), + + %% Trying to change the ram node when not clustered should always fail + ok = stop_app(Rabbit), + assert_failure(fun () -> change_cluster_node_type(Rabbit, ram) end), + assert_failure(fun () -> change_cluster_node_type(Rabbit, disc) end), + ok = start_app(Rabbit), + + ok = stop_app(Rabbit), + join_cluster(Rabbit, Hare), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, + [Rabbit, Hare]), + change_cluster_node_type(Rabbit, ram), + assert_cluster_status({[Rabbit, Hare], [Hare], [Hare]}, + [Rabbit, Hare]), + change_cluster_node_type(Rabbit, disc), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, + [Rabbit, Hare]), + change_cluster_node_type(Rabbit, ram), + ok = start_app(Rabbit), + assert_cluster_status({[Rabbit, Hare], [Hare], [Hare, Rabbit]}, + [Rabbit, Hare]), + + %% Changing to ram when you're the only ram node should fail + ok = stop_app(Hare), + assert_failure(fun () -> change_cluster_node_type(Hare, ram) end), + ok = start_app(Hare). + +change_cluster_when_node_offline(Config) -> + [Rabbit, Hare, Bunny] = cluster_members(Config), + + %% Cluster the three notes + stop_join_start(Rabbit, Hare), + assert_clustered([Rabbit, Hare]), + + stop_join_start(Bunny, Hare), + assert_clustered([Rabbit, Hare, Bunny]), + + %% Bring down Rabbit, and remove Bunny from the cluster while + %% Rabbit is offline + ok = stop_app(Rabbit), + ok = stop_app(Bunny), + ok = reset(Bunny), + assert_cluster_status({[Bunny], [Bunny], []}, [Bunny]), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, [Hare]), + assert_cluster_status( + {[Rabbit, Hare, Bunny], [Rabbit, Hare, Bunny], [Hare, Bunny]}, [Rabbit]), + + %% Bring Rabbit back up + ok = start_app(Rabbit), + assert_clustered([Rabbit, Hare]), + ok = start_app(Bunny), + assert_not_clustered(Bunny), + + %% Now the same, but Rabbit is a RAM node, and we bring up Bunny + %% before + ok = stop_app(Rabbit), + ok = change_cluster_node_type(Rabbit, ram), + ok = start_app(Rabbit), + stop_join_start(Bunny, Hare), + assert_cluster_status( + {[Rabbit, Hare, Bunny], [Hare, Bunny], [Rabbit, Hare, Bunny]}, + [Rabbit, Hare, Bunny]), + ok = stop_app(Rabbit), + ok = stop_app(Bunny), + ok = reset(Bunny), + ok = start_app(Bunny), + assert_not_clustered(Bunny), + assert_cluster_status({[Rabbit, Hare], [Hare], [Hare]}, [Hare]), + assert_cluster_status( + {[Rabbit, Hare, Bunny], [Hare, Bunny], [Hare, Bunny]}, + [Rabbit]), + ok = start_app(Rabbit), + assert_cluster_status({[Rabbit, Hare], [Hare], [Rabbit, Hare]}, + [Rabbit, Hare]), + assert_not_clustered(Bunny). + +update_cluster_nodes(Config) -> + [Rabbit, Hare, Bunny] = cluster_members(Config), + + %% Mnesia is running... + assert_failure(fun () -> update_cluster_nodes(Rabbit, Hare) end), + + ok = stop_app(Rabbit), + ok = join_cluster(Rabbit, Hare), + ok = stop_app(Bunny), + ok = join_cluster(Bunny, Hare), + ok = start_app(Bunny), + stop_reset_start(Hare), + assert_failure(fun () -> start_app(Rabbit) end), + %% Bogus node + assert_failure(fun () -> update_cluster_nodes(Rabbit, non@existant) end), + %% Inconsisent node + assert_failure(fun () -> update_cluster_nodes(Rabbit, Hare) end), + ok = update_cluster_nodes(Rabbit, Bunny), + ok = start_app(Rabbit), + assert_not_clustered(Hare), + assert_clustered([Rabbit, Bunny]). + +erlang_config(Config) -> + [Rabbit, Hare] = cluster_members(Config), + + ok = stop_app(Hare), + ok = reset(Hare), + ok = rpc:call(Hare, application, set_env, + [rabbit, cluster_nodes, {[Rabbit], disc}]), + ok = start_app(Hare), + assert_clustered([Rabbit, Hare]), + + ok = stop_app(Hare), + ok = reset(Hare), + ok = rpc:call(Hare, application, set_env, + [rabbit, cluster_nodes, {[Rabbit], ram}]), + ok = start_app(Hare), + assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]}, + [Rabbit, Hare]), + + %% Check having a stop_app'ed node around doesn't break completely. + ok = stop_app(Hare), + ok = reset(Hare), + ok = stop_app(Rabbit), + ok = rpc:call(Hare, application, set_env, + [rabbit, cluster_nodes, {[Rabbit], disc}]), + ok = start_app(Hare), + ok = start_app(Rabbit), + assert_not_clustered(Hare), + assert_not_clustered(Rabbit), + + %% We get a warning but we start anyway + ok = stop_app(Hare), + ok = reset(Hare), + ok = rpc:call(Hare, application, set_env, + [rabbit, cluster_nodes, {[non@existent], disc}]), + ok = start_app(Hare), + assert_not_clustered(Hare), + assert_not_clustered(Rabbit), + + %% If we use a legacy config file, the node fails to start. + ok = stop_app(Hare), + ok = reset(Hare), + ok = rpc:call(Hare, application, set_env, + [rabbit, cluster_nodes, [Rabbit]]), + assert_failure(fun () -> start_app(Hare) end), + assert_not_clustered(Rabbit), + + %% If we use an invalid node name, the node fails to start. + ok = stop_app(Hare), + ok = reset(Hare), + ok = rpc:call(Hare, application, set_env, + [rabbit, cluster_nodes, {["Mike's computer"], disc}]), + assert_failure(fun () -> start_app(Hare) end), + assert_not_clustered(Rabbit), + + %% If we use an invalid node type, the node fails to start. + ok = stop_app(Hare), + ok = reset(Hare), + ok = rpc:call(Hare, application, set_env, + [rabbit, cluster_nodes, {[Rabbit], blue}]), + assert_failure(fun () -> start_app(Hare) end), + assert_not_clustered(Rabbit), + + %% If we use an invalid cluster_nodes conf, the node fails to start. + ok = stop_app(Hare), + ok = reset(Hare), + ok = rpc:call(Hare, application, set_env, + [rabbit, cluster_nodes, true]), + assert_failure(fun () -> start_app(Hare) end), + assert_not_clustered(Rabbit), + + ok = stop_app(Hare), + ok = reset(Hare), + ok = rpc:call(Hare, application, set_env, + [rabbit, cluster_nodes, "Yes, please"]), + assert_failure(fun () -> start_app(Hare) end), + assert_not_clustered(Rabbit). + +force_reset_node(Config) -> + [Rabbit, Hare, _Bunny] = cluster_members(Config), + + stop_join_start(Rabbit, Hare), + stop_app(Rabbit), + force_reset(Rabbit), + %% Hare thinks that Rabbit is still clustered + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, + [Hare]), + %% %% ...but it isn't + assert_cluster_status({[Rabbit], [Rabbit], []}, [Rabbit]), + %% We can rejoin Rabbit and Hare + update_cluster_nodes(Rabbit, Hare), + start_app(Rabbit), + assert_clustered([Rabbit, Hare]). + +status_with_alarm(Config) -> + [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, + nodename), + + %% Given: an alarm is raised each node. + rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit, + ["set_vm_memory_high_watermark", "0.000000001"]), + rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare, + ["set_disk_free_limit", "2048G"]), + + %% When: we ask for cluster status. + {ok, S} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit, + ["cluster_status"]), + {ok, R} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare, + ["cluster_status"]), + + %% Then: both nodes have printed alarm information for eachother. + ok = alarm_information_on_each_node(S, Rabbit, Hare), + ok = alarm_information_on_each_node(R, Rabbit, Hare). + + +%% ---------------------------------------------------------------------------- +%% Internal utils + +cluster_members(Config) -> + rabbit_ct_broker_helpers:get_node_configs(Config, nodename). + +assert_cluster_status(Status0, Nodes) -> + Status = {AllNodes, _, _} = sort_cluster_status(Status0), + wait_for_cluster_status(Status, AllNodes, Nodes). + +wait_for_cluster_status(Status, AllNodes, Nodes) -> + Max = 10000 / ?LOOP_RECURSION_DELAY, + wait_for_cluster_status(0, Max, Status, AllNodes, Nodes). + +wait_for_cluster_status(N, Max, Status, _AllNodes, Nodes) when N >= Max -> + erlang:error({cluster_status_max_tries_failed, + [{nodes, Nodes}, + {expected_status, Status}, + {max_tried, Max}]}); +wait_for_cluster_status(N, Max, Status, AllNodes, Nodes) -> + case lists:all(fun (Node) -> + verify_status_equal(Node, Status, AllNodes) + end, Nodes) of + true -> ok; + false -> timer:sleep(?LOOP_RECURSION_DELAY), + wait_for_cluster_status(N + 1, Max, Status, AllNodes, Nodes) + end. + +verify_status_equal(Node, Status, AllNodes) -> + NodeStatus = sort_cluster_status(cluster_status(Node)), + (AllNodes =/= [Node]) =:= rpc:call(Node, rabbit_mnesia, is_clustered, []) + andalso NodeStatus =:= Status. + +cluster_status(Node) -> + {rpc:call(Node, rabbit_mnesia, cluster_nodes, [all]), + rpc:call(Node, rabbit_mnesia, cluster_nodes, [disc]), + rpc:call(Node, rabbit_mnesia, cluster_nodes, [running])}. + +sort_cluster_status({All, Disc, Running}) -> + {lists:sort(All), lists:sort(Disc), lists:sort(Running)}. + +assert_clustered(Nodes) -> + assert_cluster_status({Nodes, Nodes, Nodes}, Nodes). + +assert_not_clustered(Node) -> + assert_cluster_status({[Node], [Node], [Node]}, [Node]). + +assert_failure(Fun) -> + case catch Fun() of + {error, Reason} -> Reason; + {error_string, Reason} -> Reason; + {badrpc, {'EXIT', Reason}} -> Reason; + {badrpc_multi, Reason, _Nodes} -> Reason; + Other -> exit({expected_failure, Other}) + end. + +stop_app(Node) -> + control_action(stop_app, Node). + +start_app(Node) -> + control_action(start_app, Node). + +join_cluster(Node, To) -> + join_cluster(Node, To, false). + +join_cluster(Node, To, Ram) -> + control_action(join_cluster, Node, [atom_to_list(To)], [{"--ram", Ram}]). + +reset(Node) -> + control_action(reset, Node). + +force_reset(Node) -> + control_action(force_reset, Node). + +forget_cluster_node(Node, Removee, RemoveWhenOffline) -> + control_action(forget_cluster_node, Node, [atom_to_list(Removee)], + [{"--offline", RemoveWhenOffline}]). + +forget_cluster_node(Node, Removee) -> + forget_cluster_node(Node, Removee, false). + +change_cluster_node_type(Node, Type) -> + control_action(change_cluster_node_type, Node, [atom_to_list(Type)]). + +update_cluster_nodes(Node, DiscoveryNode) -> + control_action(update_cluster_nodes, Node, [atom_to_list(DiscoveryNode)]). + +stop_join_start(Node, ClusterTo, Ram) -> + ok = stop_app(Node), + ok = join_cluster(Node, ClusterTo, Ram), + ok = start_app(Node). + +stop_join_start(Node, ClusterTo) -> + stop_join_start(Node, ClusterTo, false). + +stop_reset_start(Node) -> + ok = stop_app(Node), + ok = reset(Node), + ok = start_app(Node). + +control_action(Command, Node) -> + control_action(Command, Node, [], []). + +control_action(Command, Node, Args) -> + control_action(Command, Node, Args, []). + +control_action(Command, Node, Args, Opts) -> + rpc:call(Node, rabbit_control_main, action, + [Command, Node, Args, Opts, + fun io:format/2]). + +declare(Ch, Name) -> + Res = amqp_channel:call(Ch, #'queue.declare'{durable = true, + queue = Name}), + amqp_channel:call(Ch, #'queue.bind'{queue = Name, + exchange = <<"amq.fanout">>}), + Res. + +alarm_information_on_each_node(Output, Rabbit, Hare) -> + + A = string:str(Output, "alarms"), true = A > 0, + + %% Test that names are printed after `alarms': this counts on + %% output with a `{Name, Value}' kind of format, for listing + %% alarms, so that we can miss any node names in preamble text. + Alarms = string:substr(Output, A), + RabbitStr = atom_to_list(Rabbit), + HareStr = atom_to_list(Hare), + match = re:run(Alarms, "\\{'?" ++ RabbitStr ++ "'?,\\[memory\\]\\}", + [{capture, none}]), + match = re:run(Alarms, "\\{'?" ++ HareStr ++ "'?,\\[disk\\]\\}", + [{capture, none}]), + + ok. diff --git a/test/crashing_queues_SUITE.erl b/test/crashing_queues_SUITE.erl new file mode 100644 index 000000000000..872b771811f3 --- /dev/null +++ b/test/crashing_queues_SUITE.erl @@ -0,0 +1,269 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(crashing_queues_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile(export_all). + +all() -> + [ + {group, cluster_size_2} + ]. + +groups() -> + [ + {cluster_size_2, [], [ + crashing_unmirrored, + crashing_mirrored, + give_up_after_repeated_crashes + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(cluster_size_2, Config) -> + rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_count, 2} + ]). + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + ClusterSize = ?config(rmq_nodes_count, Config), + TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, Testcase}, + {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +crashing_unmirrored(Config) -> + [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + ChA = rabbit_ct_client_helpers:open_channel(Config, A), + ConnB = rabbit_ct_client_helpers:open_connection(Config, B), + QName = <<"crashing_unmirrored-q">>, + amqp_channel:call(ChA, #'confirm.select'{}), + test_queue_failure(A, ChA, ConnB, 1, 0, + #'queue.declare'{queue = QName, durable = true}), + test_queue_failure(A, ChA, ConnB, 0, 0, + #'queue.declare'{queue = QName, durable = false}), + ok. + +crashing_mirrored(Config) -> + [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<".*">>, <<"all">>), + ChA = rabbit_ct_client_helpers:open_channel(Config, A), + ConnB = rabbit_ct_client_helpers:open_connection(Config, B), + QName = <<"crashing_mirrored-q">>, + amqp_channel:call(ChA, #'confirm.select'{}), + test_queue_failure(A, ChA, ConnB, 2, 1, + #'queue.declare'{queue = QName, durable = true}), + ok. + +test_queue_failure(Node, Ch, RaceConn, MsgCount, SlaveCount, Decl) -> + #'queue.declare_ok'{queue = QName} = amqp_channel:call(Ch, Decl), + try + publish(Ch, QName, transient), + publish(Ch, QName, durable), + Racer = spawn_declare_racer(RaceConn, Decl), + kill_queue(Node, QName), + assert_message_count(MsgCount, Ch, QName), + assert_slave_count(SlaveCount, Node, QName), + stop_declare_racer(Racer) + after + amqp_channel:call(Ch, #'queue.delete'{queue = QName}) + end. + +give_up_after_repeated_crashes(Config) -> + [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + ChA = rabbit_ct_client_helpers:open_channel(Config, A), + ChB = rabbit_ct_client_helpers:open_channel(Config, B), + QName = <<"give_up_after_repeated_crashes-q">>, + amqp_channel:call(ChA, #'confirm.select'{}), + amqp_channel:call(ChA, #'queue.declare'{queue = QName, + durable = true}), + await_state(A, QName, running), + publish(ChA, QName, durable), + kill_queue_hard(A, QName), + {'EXIT', _} = (catch amqp_channel:call( + ChA, #'queue.declare'{queue = QName, + durable = true})), + await_state(A, QName, crashed), + amqp_channel:call(ChB, #'queue.delete'{queue = QName}), + amqp_channel:call(ChB, #'queue.declare'{queue = QName, + durable = true}), + await_state(A, QName, running), + + %% Since it's convenient, also test absent queue status here. + rabbit_ct_broker_helpers:stop_node(Config, B), + await_state(A, QName, down), + ok. + + +publish(Ch, QName, DelMode) -> + Publish = #'basic.publish'{exchange = <<>>, routing_key = QName}, + Msg = #amqp_msg{props = #'P_basic'{delivery_mode = del_mode(DelMode)}}, + amqp_channel:cast(Ch, Publish, Msg), + amqp_channel:wait_for_confirms(Ch). + +del_mode(transient) -> 1; +del_mode(durable) -> 2. + +spawn_declare_racer(Conn, Decl) -> + Self = self(), + spawn_link(fun() -> declare_racer_loop(Self, Conn, Decl) end). + +stop_declare_racer(Pid) -> + Pid ! stop, + MRef = erlang:monitor(process, Pid), + receive + {'DOWN', MRef, process, Pid, _} -> ok + end. + +declare_racer_loop(Parent, Conn, Decl) -> + receive + stop -> unlink(Parent) + after 0 -> + %% Catch here because we might happen to catch the queue + %% while it is in the middle of recovering and thus + %% explode with NOT_FOUND because crashed. Doesn't matter, + %% we are only in this loop to try to fool the recovery + %% code anyway. + try + case amqp_connection:open_channel(Conn) of + {ok, Ch} -> amqp_channel:call(Ch, Decl); + closing -> ok + end + catch + exit:_ -> + ok + end, + declare_racer_loop(Parent, Conn, Decl) + end. + +await_state(Node, QName, State) -> + await_state(Node, QName, State, 30000). + +await_state(Node, QName, State, Time) -> + case state(Node, QName) of + State -> + ok; + Other -> + case Time of + 0 -> exit({timeout_awaiting_state, State, Other}); + _ -> timer:sleep(100), + await_state(Node, QName, State, Time - 100) + end + end. + +state(Node, QName) -> + V = <<"/">>, + Res = rabbit_misc:r(V, queue, QName), + Infos = rpc:call(Node, rabbit_amqqueue, info_all, [V, [name, state]]), + case Infos of + [] -> undefined; + [[{name, Res}, {state, State}]] -> State + end. + +kill_queue_hard(Node, QName) -> + case kill_queue(Node, QName) of + crashed -> ok; + _NewPid -> timer:sleep(100), + kill_queue_hard(Node, QName) + end. + +kill_queue(Node, QName) -> + Pid1 = queue_pid(Node, QName), + exit(Pid1, boom), + await_new_pid(Node, QName, Pid1). + +queue_pid(Node, QName) -> + #amqqueue{pid = QPid, + state = State} = lookup(Node, QName), + case State of + crashed -> case sup_child(Node, rabbit_amqqueue_sup_sup) of + {ok, _} -> QPid; %% restarting + {error, no_child} -> crashed %% given up + end; + _ -> QPid + end. + +sup_child(Node, Sup) -> + case rpc:call(Node, supervisor2, which_children, [Sup]) of + [{_, Child, _, _}] -> {ok, Child}; + [] -> {error, no_child}; + {badrpc, {'EXIT', {noproc, _}}} -> {error, no_sup} + end. + +lookup(Node, QName) -> + {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup, + [rabbit_misc:r(<<"/">>, queue, QName)]), + Q. + +await_new_pid(Node, QName, OldPid) -> + case queue_pid(Node, QName) of + OldPid -> timer:sleep(10), + await_new_pid(Node, QName, OldPid); + New -> New + end. + +assert_message_count(Count, Ch, QName) -> + #'queue.declare_ok'{message_count = Count} = + amqp_channel:call(Ch, #'queue.declare'{queue = QName, + passive = true}). + +assert_slave_count(Count, Node, QName) -> + Q = lookup(Node, QName), + [{_, Pids}] = rpc:call(Node, rabbit_amqqueue, info, [Q, [slave_pids]]), + RealCount = case Pids of + '' -> 0; + _ -> length(Pids) + end, + case RealCount of + Count -> + ok; + _ when RealCount < Count -> + timer:sleep(10), + assert_slave_count(Count, Node, QName); + _ -> + exit({too_many_slaves, Count, RealCount}) + end. diff --git a/test/dynamic_ha_SUITE.erl b/test/dynamic_ha_SUITE.erl new file mode 100644 index 000000000000..5872d97d4cbd --- /dev/null +++ b/test/dynamic_ha_SUITE.erl @@ -0,0 +1,329 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(dynamic_ha_SUITE). + +%% rabbit_tests:test_dynamic_mirroring() is a unit test which should +%% test the logic of what all the policies decide to do, so we don't +%% need to exhaustively test that here. What we need to test is that: +%% +%% * Going from non-mirrored to mirrored works and vice versa +%% * Changing policy can add / remove mirrors and change the master +%% * Adding a node will create a new mirror when there are not enough nodes +%% for the policy +%% * Removing a node will not create a new mirror even if the policy +%% logic wants it (since this gives us a good way to lose messages +%% on cluster shutdown, by repeated failover to new nodes) +%% +%% The first two are change_policy, the last two are change_cluster + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile(export_all). + +-define(QNAME, <<"ha.test">>). +-define(POLICY, <<"^ha.test$">>). %% " emacs +-define(VHOST, <<"/">>). + +all() -> + [ + {group, unclustered}, + {group, clustered} + ]. + +groups() -> + [ + {unclustered, [], [ + {cluster_size_5, [], [ + change_cluster + ]} + ]}, + {clustered, [], [ + {cluster_size_2, [], [ + vhost_deletion, + promote_on_shutdown + ]}, + {cluster_size_3, [], [ + change_policy, + rapid_change + ]} + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(unclustered, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]); +init_per_group(clustered, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]); +init_per_group(cluster_size_2, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]); +init_per_group(cluster_size_3, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]); +init_per_group(cluster_size_5, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 5}]). + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + ClusterSize = ?config(rmq_nodes_count, Config), + TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, Testcase}, + {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +change_policy(Config) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + ACh = rabbit_ct_client_helpers:open_channel(Config, A), + + %% When we first declare a queue with no policy, it's not HA. + amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME}), + assert_slaves(A, ?QNAME, {A, ''}), + + %% Give it policy "all", it becomes HA and gets all mirrors + rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, <<"all">>), + assert_slaves(A, ?QNAME, {A, [B, C]}), + + %% Give it policy "nodes", it gets specific mirrors + rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, + {<<"nodes">>, [rabbit_misc:atom_to_binary(A), + rabbit_misc:atom_to_binary(B)]}), + assert_slaves(A, ?QNAME, {A, [B]}), + + %% Now explicitly change the mirrors + rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, + {<<"nodes">>, [rabbit_misc:atom_to_binary(A), + rabbit_misc:atom_to_binary(C)]}), + assert_slaves(A, ?QNAME, {A, [C]}, [{A, [B, C]}]), + + %% Clear the policy, and we go back to non-mirrored + rabbit_ct_broker_helpers:clear_policy(Config, A, ?POLICY), + assert_slaves(A, ?QNAME, {A, ''}), + + %% Test switching "away" from an unmirrored node + rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, + {<<"nodes">>, [rabbit_misc:atom_to_binary(B), + rabbit_misc:atom_to_binary(C)]}), + assert_slaves(A, ?QNAME, {A, [B, C]}, [{A, [B]}, {A, [C]}]), + + ok. + +change_cluster(Config) -> + [A, B, C, D, E] = rabbit_ct_broker_helpers:get_node_configs(Config, + nodename), + rabbit_ct_broker_helpers:cluster_nodes(Config, [A, B, C]), + ACh = rabbit_ct_client_helpers:open_channel(Config, A), + + amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME}), + assert_slaves(A, ?QNAME, {A, ''}), + + %% Give it policy exactly 4, it should mirror to all 3 nodes + rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, + {<<"exactly">>, 4}), + assert_slaves(A, ?QNAME, {A, [B, C]}), + + %% Add D and E, D joins in + rabbit_ct_broker_helpers:cluster_nodes(Config, [A, D, E]), + assert_slaves(A, ?QNAME, {A, [B, C, D]}), + + %% Remove D, E joins in + rabbit_ct_broker_helpers:stop_node(Config, D), + assert_slaves(A, ?QNAME, {A, [B, C, E]}), + + ok. + +rapid_change(Config) -> + A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + ACh = rabbit_ct_client_helpers:open_channel(Config, A), + {_Pid, MRef} = spawn_monitor( + fun() -> + [rapid_amqp_ops(ACh, I) || I <- lists:seq(1, 100)] + end), + rapid_loop(Config, A, MRef), + ok. + +rapid_amqp_ops(Ch, I) -> + Payload = list_to_binary(integer_to_list(I)), + amqp_channel:call(Ch, #'queue.declare'{queue = ?QNAME}), + amqp_channel:cast(Ch, #'basic.publish'{exchange = <<"">>, + routing_key = ?QNAME}, + #amqp_msg{payload = Payload}), + amqp_channel:subscribe(Ch, #'basic.consume'{queue = ?QNAME, + no_ack = true}, self()), + receive #'basic.consume_ok'{} -> ok + end, + receive {#'basic.deliver'{}, #amqp_msg{payload = Payload}} -> + ok + end, + amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}). + +rapid_loop(Config, Node, MRef) -> + receive + {'DOWN', MRef, process, _Pid, normal} -> + ok; + {'DOWN', MRef, process, _Pid, Reason} -> + exit({amqp_ops_died, Reason}) + after 0 -> + rabbit_ct_broker_helpers:set_ha_policy(Config, Node, ?POLICY, + <<"all">>), + rabbit_ct_broker_helpers:clear_policy(Config, Node, ?POLICY), + rapid_loop(Config, Node, MRef) + end. + +%% Vhost deletion needs to successfully tear down policies and queues +%% with policies. At least smoke-test that it doesn't blow up. +vhost_deletion(Config) -> + A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + rabbit_ct_broker_helpers:set_ha_policy_all(Config), + ACh = rabbit_ct_client_helpers:open_channel(Config, A), + amqp_channel:call(ACh, #'queue.declare'{queue = <<"vhost_deletion-q">>}), + ok = rpc:call(A, rabbit_vhost, delete, [<<"/">>]), + ok. + +promote_on_shutdown(Config) -> + [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.promote">>, + <<"all">>, [{<<"ha-promote-on-shutdown">>, <<"always">>}]), + rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.nopromote">>, + <<"all">>), + + ACh = rabbit_ct_client_helpers:open_channel(Config, A), + [begin + amqp_channel:call(ACh, #'queue.declare'{queue = Q, + durable = true}), + rabbit_ct_client_helpers:publish(ACh, Q, 10) + end || Q <- [<<"ha.promote.test">>, <<"ha.nopromote.test">>]], + ok = rabbit_ct_broker_helpers:restart_node(Config, B), + ok = rabbit_ct_broker_helpers:stop_node(Config, A), + BCh = rabbit_ct_client_helpers:open_channel(Config, B), + #'queue.declare_ok'{message_count = 0} = + amqp_channel:call( + BCh, #'queue.declare'{queue = <<"ha.promote.test">>, + durable = true}), + ?assertExit( + {{shutdown, {server_initiated_close, 404, _}}, _}, + amqp_channel:call( + BCh, #'queue.declare'{queue = <<"ha.nopromote.test">>, + durable = true})), + ok = rabbit_ct_broker_helpers:start_node(Config, A), + ACh2 = rabbit_ct_client_helpers:open_channel(Config, A), + #'queue.declare_ok'{message_count = 10} = + amqp_channel:call( + ACh2, #'queue.declare'{queue = <<"ha.nopromote.test">>, + durable = true}), + ok. + +%%---------------------------------------------------------------------------- + +assert_slaves(RPCNode, QName, Exp) -> + assert_slaves(RPCNode, QName, Exp, []). + +assert_slaves(RPCNode, QName, Exp, PermittedIntermediate) -> + assert_slaves0(RPCNode, QName, Exp, + [{get(previous_exp_m_node), get(previous_exp_s_nodes)} | + PermittedIntermediate]). + +assert_slaves0(RPCNode, QName, {ExpMNode, ExpSNodes}, PermittedIntermediate) -> + Q = find_queue(QName, RPCNode), + Pid = proplists:get_value(pid, Q), + SPids = proplists:get_value(slave_pids, Q), + ActMNode = node(Pid), + ActSNodes = case SPids of + '' -> ''; + _ -> [node(SPid) || SPid <- SPids] + end, + case ExpMNode =:= ActMNode andalso equal_list(ExpSNodes, ActSNodes) of + false -> + %% It's an async change, so if nothing has changed let's + %% just wait - of course this means if something does not + %% change when expected then we time out the test which is + %% a bit tedious + case [found || {PermMNode, PermSNodes} <- PermittedIntermediate, + PermMNode =:= ActMNode, + equal_list(PermSNodes, ActSNodes)] of + [] -> ct:fail("Expected ~p / ~p, got ~p / ~p~nat ~p~n", + [ExpMNode, ExpSNodes, ActMNode, ActSNodes, + get_stacktrace()]); + _ -> timer:sleep(100), + assert_slaves0(RPCNode, QName, {ExpMNode, ExpSNodes}, + PermittedIntermediate) + end; + true -> + put(previous_exp_m_node, ExpMNode), + put(previous_exp_s_nodes, ExpSNodes), + ok + end. + +equal_list('', '') -> true; +equal_list('', _Act) -> false; +equal_list(_Exp, '') -> false; +equal_list([], []) -> true; +equal_list(_Exp, []) -> false; +equal_list([], _Act) -> false; +equal_list([H|T], Act) -> case lists:member(H, Act) of + true -> equal_list(T, Act -- [H]); + false -> false + end. + +find_queue(QName, RPCNode) -> + Qs = rpc:call(RPCNode, rabbit_amqqueue, info_all, [?VHOST], infinity), + case find_queue0(QName, Qs) of + did_not_find_queue -> timer:sleep(100), + find_queue(QName, RPCNode); + Q -> Q + end. + +find_queue0(QName, Qs) -> + case [Q || Q <- Qs, proplists:get_value(name, Q) =:= + rabbit_misc:r(?VHOST, queue, QName)] of + [R] -> R; + [] -> did_not_find_queue + end. + +get_stacktrace() -> + try + throw(e) + catch + _:e -> + erlang:get_stacktrace() + end. diff --git a/test/eager_sync_SUITE.erl b/test/eager_sync_SUITE.erl new file mode 100644 index 000000000000..93b308b6c545 --- /dev/null +++ b/test/eager_sync_SUITE.erl @@ -0,0 +1,278 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(eager_sync_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile(export_all). + +-define(QNAME, <<"ha.two.test">>). +-define(QNAME_AUTO, <<"ha.auto.test">>). +-define(MESSAGE_COUNT, 2000). + +all() -> + [ + {group, non_parallel_tests} + ]. + +groups() -> + [ + {non_parallel_tests, [], [ + eager_sync, + eager_sync_cancel, + eager_sync_auto, + eager_sync_auto_on_policy_change, + eager_sync_requeue + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + ClusterSize = 3, + TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_count, ClusterSize}, + {rmq_nodes_clustered, true}, + {rmq_nodename_suffix, Testcase}, + {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps() ++ [ + fun rabbit_ct_broker_helpers:set_ha_policy_two_pos/1, + fun rabbit_ct_broker_helpers:set_ha_policy_two_pos_batch_sync/1 + ]). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +eager_sync(Config) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + %% Queue is on AB but not C. + ACh = rabbit_ct_client_helpers:open_channel(Config, A), + Ch = rabbit_ct_client_helpers:open_channel(Config, C), + amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME, + durable = true}), + + %% Don't sync, lose messages + rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), + restart(Config, A), + restart(Config, B), + rabbit_ct_client_helpers:consume(Ch, ?QNAME, 0), + + %% Sync, keep messages + rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), + restart(Config, A), + ok = sync(C, ?QNAME), + restart(Config, B), + rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT), + + %% Check the no-need-to-sync path + rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), + ok = sync(C, ?QNAME), + rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT), + + %% keep unacknowledged messages + rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), + rabbit_ct_client_helpers:fetch(Ch, ?QNAME, 2), + restart(Config, A), + rabbit_ct_client_helpers:fetch(Ch, ?QNAME, 3), + sync(C, ?QNAME), + restart(Config, B), + rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT), + + ok. + +eager_sync_cancel(Config) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + %% Queue is on AB but not C. + ACh = rabbit_ct_client_helpers:open_channel(Config, A), + Ch = rabbit_ct_client_helpers:open_channel(Config, C), + + set_app_sync_batch_size(A), + set_app_sync_batch_size(B), + set_app_sync_batch_size(C), + + amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME, + durable = true}), + {ok, not_syncing} = sync_cancel(C, ?QNAME), %% Idempotence + eager_sync_cancel_test2(Config, A, B, C, Ch). + +eager_sync_cancel_test2(Config, A, B, C, Ch) -> + %% Sync then cancel + rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), + restart(Config, A), + set_app_sync_batch_size(A), + spawn_link(fun() -> ok = sync_nowait(C, ?QNAME) end), + case wait_for_syncing(C, ?QNAME, 1) of + ok -> + case sync_cancel(C, ?QNAME) of + ok -> + wait_for_running(C, ?QNAME), + restart(Config, B), + set_app_sync_batch_size(B), + rabbit_ct_client_helpers:consume(Ch, ?QNAME, 0), + + {ok, not_syncing} = sync_cancel(C, ?QNAME), %% Idempotence + ok; + {ok, not_syncing} -> + %% Damn. Syncing finished between wait_for_syncing/3 and + %% sync_cancel/2 above. Start again. + amqp_channel:call(Ch, #'queue.purge'{queue = ?QNAME}), + eager_sync_cancel_test2(Config, A, B, C, Ch) + end; + synced_already -> + %% Damn. Syncing finished before wait_for_syncing/3. Start again. + amqp_channel:call(Ch, #'queue.purge'{queue = ?QNAME}), + eager_sync_cancel_test2(Config, A, B, C, Ch) + end. + +eager_sync_auto(Config) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + ACh = rabbit_ct_client_helpers:open_channel(Config, A), + Ch = rabbit_ct_client_helpers:open_channel(Config, C), + amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME_AUTO, + durable = true}), + + %% Sync automatically, don't lose messages + rabbit_ct_client_helpers:publish(Ch, ?QNAME_AUTO, ?MESSAGE_COUNT), + restart(Config, A), + wait_for_sync(C, ?QNAME_AUTO), + restart(Config, B), + wait_for_sync(C, ?QNAME_AUTO), + rabbit_ct_client_helpers:consume(Ch, ?QNAME_AUTO, ?MESSAGE_COUNT), + + ok. + +eager_sync_auto_on_policy_change(Config) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + %% Queue is on AB but not C. + ACh = rabbit_ct_client_helpers:open_channel(Config, A), + Ch = rabbit_ct_client_helpers:open_channel(Config, C), + amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME, + durable = true}), + + %% Sync automatically once the policy is changed to tell us to. + rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), + restart(Config, A), + Params = [rabbit_misc:atom_to_binary(N) || N <- [A, B]], + rabbit_ct_broker_helpers:set_ha_policy(Config, + A, <<"^ha.two.">>, {<<"nodes">>, Params}, + [{<<"ha-sync-mode">>, <<"automatic">>}]), + wait_for_sync(C, ?QNAME), + + ok. + +eager_sync_requeue(Config) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + %% Queue is on AB but not C. + ACh = rabbit_ct_client_helpers:open_channel(Config, A), + Ch = rabbit_ct_client_helpers:open_channel(Config, C), + amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME, + durable = true}), + + rabbit_ct_client_helpers:publish(Ch, ?QNAME, 2), + {#'basic.get_ok'{delivery_tag = TagA}, _} = + amqp_channel:call(Ch, #'basic.get'{queue = ?QNAME}), + {#'basic.get_ok'{delivery_tag = TagB}, _} = + amqp_channel:call(Ch, #'basic.get'{queue = ?QNAME}), + amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = TagA, requeue = true}), + restart(Config, B), + ok = sync(C, ?QNAME), + amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = TagB, requeue = true}), + rabbit_ct_client_helpers:consume(Ch, ?QNAME, 2), + + ok. + +restart(Config, Node) -> + rabbit_ct_broker_helpers:restart_broker(Config, Node). + +sync(Node, QName) -> + case sync_nowait(Node, QName) of + ok -> wait_for_sync(Node, QName), + ok; + R -> R + end. + +sync_nowait(Node, QName) -> action(Node, sync_queue, QName). +sync_cancel(Node, QName) -> action(Node, cancel_sync_queue, QName). + +wait_for_sync(Node, QName) -> + sync_detection_SUITE:wait_for_sync_status(true, Node, QName). + +action(Node, Action, QName) -> + rabbit_ct_broker_helpers:control_action( + Action, Node, [binary_to_list(QName)], [{"-p", "/"}]). + +queue(Node, QName) -> + QNameRes = rabbit_misc:r(<<"/">>, queue, QName), + {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup, [QNameRes]), + Q. + +wait_for_syncing(Node, QName, Target) -> + case state(Node, QName) of + {{syncing, _}, _} -> ok; + {running, Target} -> synced_already; + _ -> timer:sleep(100), + wait_for_syncing(Node, QName, Target) + end. + +wait_for_running(Node, QName) -> + case state(Node, QName) of + {running, _} -> ok; + _ -> timer:sleep(100), + wait_for_running(Node, QName) + end. + +state(Node, QName) -> + [{state, State}, {synchronised_slave_pids, Pids}] = + rpc:call(Node, rabbit_amqqueue, info, + [queue(Node, QName), [state, synchronised_slave_pids]]), + {State, length(Pids)}. + +%% eager_sync_cancel_test needs a batch size that's < ?MESSAGE_COUNT +%% in order to pass, because a SyncBatchSize >= ?MESSAGE_COUNT will +%% always finish before the test is able to cancel the sync. +set_app_sync_batch_size(Node) -> + rabbit_ct_broker_helpers:control_action( + eval, Node, + ["application:set_env(rabbit, mirroring_sync_batch_size, 1)."]). diff --git a/test/inet_proxy_dist.erl b/test/inet_proxy_dist.erl new file mode 100644 index 000000000000..32b7641a7988 --- /dev/null +++ b/test/inet_proxy_dist.erl @@ -0,0 +1,201 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +%% +-module(inet_proxy_dist). + +%% A distribution plugin that uses the usual inet_tcp_dist but allows +%% insertion of a proxy at the receiving end. + +%% inet_*_dist "behaviour" +-export([listen/1, accept/1, accept_connection/5, + setup/5, close/1, select/1, is_node_name/1]). + +%% For copypasta from inet_tcp_dist +-export([do_setup/6]). +-import(error_logger,[error_msg/2]). + +-define(REAL, inet_tcp_dist). + +%%---------------------------------------------------------------------------- + +listen(Name) -> ?REAL:listen(Name). +select(Node) -> ?REAL:select(Node). +accept(Listen) -> ?REAL:accept(Listen). +close(Socket) -> ?REAL:close(Socket). +is_node_name(Node) -> ?REAL:is_node_name(Node). + +accept_connection(AcceptPid, Socket, MyNode, Allowed, SetupTime) -> + ?REAL:accept_connection(AcceptPid, Socket, MyNode, Allowed, SetupTime). + +%% This is copied from inet_tcp_dist, in order to change the +%% output of erl_epmd:port_please/2. + +-include_lib("kernel/include/net_address.hrl"). +-include_lib("kernel/include/dist_util.hrl"). + +setup(Node, Type, MyNode, LongOrShortNames,SetupTime) -> + spawn_opt(?MODULE, do_setup, + [self(), Node, Type, MyNode, LongOrShortNames, SetupTime], + [link, {priority, max}]). + +do_setup(Kernel, Node, Type, MyNode, LongOrShortNames,SetupTime) -> + ?trace("~p~n",[{inet_tcp_dist,self(),setup,Node}]), + [Name, Address] = splitnode(Node, LongOrShortNames), + case inet:getaddr(Address, inet) of + {ok, Ip} -> + Timer = dist_util:start_timer(SetupTime), + case erl_epmd:port_please(Name, Ip) of + {port, TcpPort, Version} -> + ?trace("port_please(~p) -> version ~p~n", + [Node,Version]), + dist_util:reset_timer(Timer), + %% Modification START + Ret = application:get_env(kernel, + dist_and_proxy_ports_map), + PortsMap = case Ret of + {ok, M} -> M; + undefined -> [] + end, + ProxyPort = case inet_tcp_proxy:is_enabled() of + true -> proplists:get_value(TcpPort, PortsMap, TcpPort); + false -> TcpPort + end, + case inet_tcp:connect(Ip, ProxyPort, + [{active, false}, + {packet,2}]) of + {ok, Socket} -> + {ok, {_, SrcPort}} = inet:sockname(Socket), + ok = inet_tcp_proxy_manager:register( + node(), Node, SrcPort, TcpPort, ProxyPort), + %% Modification END + HSData = #hs_data{ + kernel_pid = Kernel, + other_node = Node, + this_node = MyNode, + socket = Socket, + timer = Timer, + this_flags = 0, + other_version = Version, + f_send = fun inet_tcp:send/2, + f_recv = fun inet_tcp:recv/3, + f_setopts_pre_nodeup = + fun(S) -> + inet:setopts + (S, + [{active, false}, + {packet, 4}, + nodelay()]) + end, + f_setopts_post_nodeup = + fun(S) -> + inet:setopts + (S, + [{active, true}, + {deliver, port}, + {packet, 4}, + nodelay()]) + end, + f_getll = fun inet:getll/1, + f_address = + fun(_,_) -> + #net_address{ + address = {Ip,TcpPort}, + host = Address, + protocol = tcp, + family = inet} + end, + mf_tick = fun tick/1, + mf_getstat = fun inet_tcp_dist:getstat/1, + request_type = Type + }, + dist_util:handshake_we_started(HSData); + R -> + io:format("~p failed! ~p~n", [node(), R]), + %% Other Node may have closed since + %% port_please ! + ?trace("other node (~p) " + "closed since port_please.~n", + [Node]), + ?shutdown(Node) + end; + _ -> + ?trace("port_please (~p) " + "failed.~n", [Node]), + ?shutdown(Node) + end; + _Other -> + ?trace("inet_getaddr(~p) " + "failed (~p).~n", [Node,_Other]), + ?shutdown(Node) + end. + +%% If Node is illegal terminate the connection setup!! +splitnode(Node, LongOrShortNames) -> + case split_node(atom_to_list(Node), $@, []) of + [Name|Tail] when Tail =/= [] -> + Host = lists:append(Tail), + case split_node(Host, $., []) of + [_] when LongOrShortNames =:= longnames -> + error_msg("** System running to use " + "fully qualified " + "hostnames **~n" + "** Hostname ~s is illegal **~n", + [Host]), + ?shutdown(Node); + L when length(L) > 1, LongOrShortNames =:= shortnames -> + error_msg("** System NOT running to use fully qualified " + "hostnames **~n" + "** Hostname ~s is illegal **~n", + [Host]), + ?shutdown(Node); + _ -> + [Name, Host] + end; + [_] -> + error_msg("** Nodename ~p illegal, no '@' character **~n", + [Node]), + ?shutdown(Node); + _ -> + error_msg("** Nodename ~p illegal **~n", [Node]), + ?shutdown(Node) + end. + +split_node([Chr|T], Chr, Ack) -> [lists:reverse(Ack)|split_node(T, Chr, [])]; +split_node([H|T], Chr, Ack) -> split_node(T, Chr, [H|Ack]); +split_node([], _, Ack) -> [lists:reverse(Ack)]. + +%% we may not always want the nodelay behaviour +%% for performance reasons + +nodelay() -> + case application:get_env(kernel, dist_nodelay) of + undefined -> + {nodelay, true}; + {ok, true} -> + {nodelay, true}; + {ok, false} -> + {nodelay, false}; + _ -> + {nodelay, true} + end. + +tick(Socket) -> + case inet_tcp:send(Socket, [], [force]) of + {error, closed} -> + self() ! {tcp_closed, Socket}, + {error, closed}; + R -> + R + end. diff --git a/test/inet_tcp_proxy.erl b/test/inet_tcp_proxy.erl new file mode 100644 index 000000000000..4498b8f9520d --- /dev/null +++ b/test/inet_tcp_proxy.erl @@ -0,0 +1,134 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +%% +-module(inet_tcp_proxy). + +%% A TCP proxy for insertion into the Erlang distribution mechanism, +%% which allows us to simulate network partitions. + +-export([start/3, reconnect/1, is_enabled/0, allow/1, block/1]). + +-define(TABLE, ?MODULE). + +%% This can't start_link because there's no supervision hierarchy we +%% can easily fit it into (we need to survive all application +%% restarts). So we have to do some horrible error handling. + +start(ManagerNode, DistPort, ProxyPort) -> + application:set_env(kernel, inet_tcp_proxy_manager_node, ManagerNode), + Parent = self(), + Pid = spawn(error_handler(fun() -> go(Parent, DistPort, ProxyPort) end)), + MRef = erlang:monitor(process, Pid), + receive + ready -> + erlang:demonitor(MRef), + ok; + {'DOWN', MRef, _, _, Reason} -> + {error, Reason} + end. + +reconnect(Nodes) -> + [erlang:disconnect_node(N) || N <- Nodes, N =/= node()], + ok. + +is_enabled() -> + lists:member(?TABLE, ets:all()). + +allow(Node) -> + rabbit_log:info("(~s) Allowing distribution between ~s and ~s~n", + [?MODULE, node(), Node]), + ets:delete(?TABLE, Node). +block(Node) -> + rabbit_log:info("(~s) BLOCKING distribution between ~s and ~s~n", + [?MODULE, node(), Node]), + ets:insert(?TABLE, {Node, block}). + +%%---------------------------------------------------------------------------- + +error_handler(Thunk) -> + fun () -> + try + Thunk() + catch _:{{nodedown, _}, _} -> + %% The only other node we ever talk to is the test + %% runner; if that's down then the test is nearly + %% over; die quietly. + ok; + _:X -> + io:format(user, "TCP proxy died with ~p~n At ~p~n", + [X, erlang:get_stacktrace()]), + erlang:halt(1) + end + end. + +go(Parent, Port, ProxyPort) -> + ets:new(?TABLE, [public, named_table]), + {ok, Sock} = gen_tcp:listen(ProxyPort, [inet, + {reuseaddr, true}]), + Parent ! ready, + accept_loop(Sock, Port). + +accept_loop(ListenSock, Port) -> + {ok, Sock} = gen_tcp:accept(ListenSock), + Proxy = spawn(error_handler(fun() -> run_it(Sock, Port) end)), + ok = gen_tcp:controlling_process(Sock, Proxy), + accept_loop(ListenSock, Port). + +run_it(SockIn, Port) -> + case {inet:peername(SockIn), inet:sockname(SockIn)} of + {{ok, {_Addr, SrcPort}}, {ok, {Addr, _OtherPort}}} -> + {ok, Remote, This} = inet_tcp_proxy_manager:lookup(SrcPort), + case node() of + This -> ok; + _ -> exit({not_me, node(), This}) + end, + {ok, SockOut} = gen_tcp:connect(Addr, Port, [inet]), + run_loop({SockIn, SockOut}, Remote, []); + _ -> + ok + end. + +run_loop(Sockets, RemoteNode, Buf0) -> + Block = [{RemoteNode, block}] =:= ets:lookup(?TABLE, RemoteNode), + receive + {tcp, Sock, Data} -> + Buf = [Data | Buf0], + case {Block, get(dist_was_blocked)} of + {true, false} -> + put(dist_was_blocked, Block), + rabbit_log:warning( + "(~s) Distribution BLOCKED between ~s and ~s~n", + [?MODULE, node(), RemoteNode]); + {false, S} when S =:= true orelse S =:= undefined -> + put(dist_was_blocked, Block), + rabbit_log:warning( + "(~s) Distribution allowed between ~s and ~s~n", + [?MODULE, node(), RemoteNode]); + _ -> + ok + end, + case Block of + false -> gen_tcp:send(other(Sock, Sockets), lists:reverse(Buf)), + run_loop(Sockets, RemoteNode, []); + true -> run_loop(Sockets, RemoteNode, Buf) + end; + {tcp_closed, Sock} -> + gen_tcp:close(other(Sock, Sockets)); + X -> + exit({weirdness, X}) + end. + +other(A, {A, B}) -> B; +other(B, {A, B}) -> A. diff --git a/test/inet_tcp_proxy_manager.erl b/test/inet_tcp_proxy_manager.erl new file mode 100644 index 000000000000..18255b8d4849 --- /dev/null +++ b/test/inet_tcp_proxy_manager.erl @@ -0,0 +1,107 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +%% +-module(inet_tcp_proxy_manager). + +%% The TCP proxies need to decide whether to block based on the node +%% they're running on, and the node connecting to them. The trouble +%% is, they don't have an easy way to determine the latter. Therefore +%% when A connects to B we register the source port used by A here, so +%% that B can later look it up and find out who A is without having to +%% sniff the distribution protocol. +%% +%% That does unfortunately mean that we need a central control +%% thing. We assume here it's running on the node called +%% 'standalone_test' since that's where tests are orchestrated from. +%% +%% Yes, this leaks. For its intended lifecycle, that's fine. + +-behaviour(gen_server). + +-export([start/0, register/5, lookup/1]). + +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, + code_change/3]). + +-define(NODE, ct). + +-record(state, {ports, pending}). + +start() -> + gen_server:start({local, ?MODULE}, ?MODULE, [], []). + +register(_From, _To, _SrcPort, Port, Port) -> + %% No proxy, don't register + ok; +register(From, To, SrcPort, _Port, _ProxyPort) -> + gen_server:call(name(), {register, From, To, SrcPort}, infinity). + +lookup(SrcPort) -> + gen_server:call(name(), {lookup, SrcPort}, infinity). + +controller_node() -> + {ok, ManagerNode} = application:get_env(kernel, + inet_tcp_proxy_manager_node), + ManagerNode. + +name() -> + {?MODULE, controller_node()}. + +%%---------------------------------------------------------------------------- + +init([]) -> + net_kernel:monitor_nodes(true), + {ok, #state{ports = dict:new(), + pending = []}}. + +handle_call({register, FromNode, ToNode, SrcPort}, _From, + State = #state{ports = Ports, + pending = Pending}) -> + {Notify, Pending2} = + lists:partition(fun ({P, _}) -> P =:= SrcPort end, Pending), + [gen_server:reply(From, {ok, FromNode, ToNode}) || {_, From} <- Notify], + {reply, ok, + State#state{ports = dict:store(SrcPort, {FromNode, ToNode}, Ports), + pending = Pending2}}; + +handle_call({lookup, SrcPort}, From, + State = #state{ports = Ports, pending = Pending}) -> + case dict:find(SrcPort, Ports) of + {ok, {FromNode, ToNode}} -> + {reply, {ok, FromNode, ToNode}, State}; + error -> + {noreply, State#state{pending = [{SrcPort, From} | Pending]}} + end; + +handle_call(_Req, _From, State) -> + {reply, unknown_request, State}. + +handle_cast(_C, State) -> + {noreply, State}. + +handle_info({nodedown, Node}, State = #state{ports = Ports}) -> + Ports1 = dict:filter( + fun (_, {From, To}) -> + Node =/= From andalso Node =/= To + end, Ports), + {noreply, State#state{ports = Ports1}}; + +handle_info(_I, State) -> + {noreply, State}. + +terminate(_Reason, _State) -> + ok. + +code_change(_, State, _) -> {ok, State}. diff --git a/test/lazy_queue_SUITE.erl b/test/lazy_queue_SUITE.erl new file mode 100644 index 000000000000..fe105cddd011 --- /dev/null +++ b/test/lazy_queue_SUITE.erl @@ -0,0 +1,224 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(lazy_queue_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile(export_all). + +-define(QNAME, <<"queue.mode.test">>). +-define(MESSAGE_COUNT, 2000). + +all() -> + [ + {group, non_parallel_tests} + ]. + +groups() -> + [ + {non_parallel_tests, [], [ + declare_args, + queue_mode_policy, + publish_consume + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + ClusterSize = 2, + TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_count, ClusterSize}, + {rmq_nodes_clustered, true}, + {rmq_nodename_suffix, Testcase}, + {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps() ++ [ + fun rabbit_ct_broker_helpers:set_ha_policy_all/1 + ]). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +declare_args(Config) -> + A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, A), + LQ = <<"lazy-q">>, + declare(Ch, LQ, [{<<"x-queue-mode">>, longstr, <<"lazy">>}]), + assert_queue_mode(A, LQ, lazy), + + DQ = <<"default-q">>, + declare(Ch, DQ, [{<<"x-queue-mode">>, longstr, <<"default">>}]), + assert_queue_mode(A, DQ, default), + + DQ2 = <<"default-q2">>, + declare(Ch, DQ2), + assert_queue_mode(A, DQ2, default), + + passed. + +queue_mode_policy(Config) -> + A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + + set_ha_mode_policy(Config, A, <<"lazy">>), + + Ch = rabbit_ct_client_helpers:open_channel(Config, A), + + LQ = <<"lazy-q">>, + declare(Ch, LQ, [{<<"x-queue-mode">>, longstr, <<"lazy">>}]), + assert_queue_mode(A, LQ, lazy), + + LQ2 = <<"lazy-q-2">>, + declare(Ch, LQ2), + assert_queue_mode(A, LQ2, lazy), + + DQ = <<"default-q">>, + declare(Ch, DQ, [{<<"x-queue-mode">>, longstr, <<"default">>}]), + assert_queue_mode(A, DQ, default), + + set_ha_mode_policy(Config, A, <<"default">>), + + ok = wait_for_queue_mode(A, LQ, lazy, 5000), + ok = wait_for_queue_mode(A, LQ2, default, 5000), + ok = wait_for_queue_mode(A, DQ, default, 5000), + + passed. + +publish_consume(Config) -> + A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, A), + declare(Ch, ?QNAME), + + rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), + consume(Ch, ?QNAME, ack), + [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)], + + set_ha_mode_policy(Config, A, <<"lazy">>), + rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), + rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), + [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)], + + set_ha_mode_policy(Config, A, <<"default">>), + [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)], + + rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), + set_ha_mode_policy(Config, A, <<"lazy">>), + rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), + set_ha_mode_policy(Config, A, <<"default">>), + [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)], + + set_ha_mode_policy(Config, A, <<"lazy">>), + [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)], + + cancel(Ch), + + passed. + +%%---------------------------------------------------------------------------- + +declare(Ch, Q) -> + declare(Ch, Q, []). + +declare(Ch, Q, Args) -> + amqp_channel:call(Ch, #'queue.declare'{queue = Q, + durable = true, + arguments = Args}). + +consume(Ch, Q, Ack) -> + amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, + no_ack = Ack =:= no_ack, + consumer_tag = <<"ctag">>}, + self()), + receive + #'basic.consume_ok'{consumer_tag = <<"ctag">>} -> + ok + end. + +cancel(Ch) -> + amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = <<"ctag">>}). + +assert_delivered(Ch, Ack, Payload) -> + PBin = payload2bin(Payload), + receive + {#'basic.deliver'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} -> + PBin = PBin2, + maybe_ack(Ch, Ack, DTag) + end. + +maybe_ack(Ch, do_ack, DTag) -> + amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}), + DTag; +maybe_ack(_Ch, _, DTag) -> + DTag. + +payload2bin(Int) -> list_to_binary(integer_to_list(Int)). + +set_ha_mode_policy(Config, Node, Mode) -> + ok = rabbit_ct_broker_helpers:set_ha_policy(Config, Node, <<".*">>, <<"all">>, + [{<<"queue-mode">>, Mode}]). + + +wait_for_queue_mode(_Node, _Q, _Mode, Max) when Max < 0 -> + fail; +wait_for_queue_mode(Node, Q, Mode, Max) -> + case get_queue_mode(Node, Q) of + Mode -> ok; + _ -> timer:sleep(100), + wait_for_queue_mode(Node, Q, Mode, Max - 100) + end. + +assert_queue_mode(Node, Q, Expected) -> + Actual = get_queue_mode(Node, Q), + Expected = Actual. + +get_queue_mode(Node, Q) -> + QNameRes = rabbit_misc:r(<<"/">>, queue, Q), + {ok, AMQQueue} = + rpc:call(Node, rabbit_amqqueue, lookup, [QNameRes]), + [{backing_queue_status, Status}] = + rpc:call(Node, rabbit_amqqueue, info, + [AMQQueue, [backing_queue_status]]), + proplists:get_value(mode, Status). diff --git a/test/many_node_ha_SUITE.erl b/test/many_node_ha_SUITE.erl new file mode 100644 index 000000000000..22b39e7a3d37 --- /dev/null +++ b/test/many_node_ha_SUITE.erl @@ -0,0 +1,117 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(many_node_ha_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile(export_all). + +all() -> + [ + {group, cluster_size_6} + ]. + +groups() -> + [ + {cluster_size_6, [], [ + kill_intermediate + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(cluster_size_6, Config) -> + rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_count, 6} + ]). + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + ClusterSize = ?config(rmq_nodes_count, Config), + TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_clustered, true}, + {rmq_nodename_suffix, Testcase}, + {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps() ++ [ + fun rabbit_ct_broker_helpers:set_ha_policy_all/1 + ]). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +kill_intermediate(Config) -> + [A, B, C, D, E, F] = rabbit_ct_broker_helpers:get_node_configs(Config, + nodename), + Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000), + MasterChannel = rabbit_ct_client_helpers:open_channel(Config, A), + ConsumerChannel = rabbit_ct_client_helpers:open_channel(Config, E), + ProducerChannel = rabbit_ct_client_helpers:open_channel(Config, F), + Queue = <<"test">>, + amqp_channel:call(MasterChannel, #'queue.declare'{queue = Queue, + auto_delete = false}), + + %% TODO: this seems *highly* timing dependant - the assumption being + %% that the kill will work quickly enough that there will still be + %% some messages in-flight that we *must* receive despite the intervening + %% node deaths. It would be nice if we could find a means to do this + %% in a way that is not actually timing dependent. + + %% Worse still, it assumes that killing the master will cause a + %% failover to Slave1, and so on. Nope. + + ConsumerPid = rabbit_ha_test_consumer:create(ConsumerChannel, + Queue, self(), false, Msgs), + + ProducerPid = rabbit_ha_test_producer:create(ProducerChannel, + Queue, self(), false, Msgs), + + %% create a killer for the master and the first 3 slaves + [rabbit_ct_broker_helpers:kill_node_after(Config, Node, Time) || + {Node, Time} <- [{A, 50}, + {B, 50}, + {C, 100}, + {D, 100}]], + + %% verify that the consumer got all msgs, or die, or time out + rabbit_ha_test_producer:await_response(ProducerPid), + rabbit_ha_test_consumer:await_response(ConsumerPid), + ok. + diff --git a/test/partitions_SUITE.erl b/test/partitions_SUITE.erl new file mode 100644 index 000000000000..b93e1ea9dd3f --- /dev/null +++ b/test/partitions_SUITE.erl @@ -0,0 +1,438 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2011-2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(partitions_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile(export_all). + +%% We set ticktime to 1s and setuptime is 7s so to make sure it +%% passes... +-define(DELAY, 8000). + +all() -> + [ + {group, net_ticktime_1}, + {group, net_ticktime_10} + ]. + +groups() -> + [ + {net_ticktime_1, [], [ + {cluster_size_2, [], [ + ctl_ticktime_sync, + prompt_disconnect_detection + ]}, + {cluster_size_3, [], [ + autoheal, + autoheal_after_pause_if_all_down, + ignore, + pause_if_all_down_on_blocked, + pause_if_all_down_on_down, + pause_minority_on_blocked, + pause_minority_on_down, + partial_false_positive, + partial_to_full, + partial_pause_minority, + partial_pause_if_all_down + ]} + ]}, + {net_ticktime_10, [], [ + {cluster_size_2, [], [ + pause_if_all_down_false_promises_mirrored, + pause_if_all_down_false_promises_unmirrored, + pause_minority_false_promises_mirrored, + pause_minority_false_promises_unmirrored + ]} + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config, [ + fun enable_dist_proxy_manager/1 + ]). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(net_ticktime_1, Config) -> + rabbit_ct_helpers:set_config(Config, [{net_ticktime, 1}]); +init_per_group(net_ticktime_10, Config) -> + rabbit_ct_helpers:set_config(Config, [{net_ticktime, 10}]); +init_per_group(cluster_size_2, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]); +init_per_group(cluster_size_3, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]). + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + ClusterSize = ?config(rmq_nodes_count, Config), + TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_clustered, false}, + {rmq_nodename_suffix, Testcase}, + {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps() ++ [ + fun enable_dist_proxy/1, + fun rabbit_ct_broker_helpers:cluster_nodes/1 + ]). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +enable_dist_proxy_manager(Config) -> + inet_tcp_proxy_manager:start(), + rabbit_ct_helpers:set_config(Config, + {erlang_dist_module, inet_proxy_dist}). + +enable_dist_proxy(Config) -> + NodeConfigs = rabbit_ct_broker_helpers:get_node_configs(Config), + Nodes = [?config(nodename, NodeConfig) || NodeConfig <- NodeConfigs], + ManagerNode = node(), + ok = lists:foreach( + fun(NodeConfig) -> + ok = rabbit_ct_broker_helpers:rpc(Config, + ?config(nodename, NodeConfig), + ?MODULE, enable_dist_proxy_on_node, + [NodeConfig, ManagerNode, Nodes]) + end, NodeConfigs), + Config. + +enable_dist_proxy_on_node(NodeConfig, ManagerNode, Nodes) -> + Nodename = ?config(nodename, NodeConfig), + DistPort = ?config(tcp_port_erlang_dist, NodeConfig), + ProxyPort = ?config(tcp_port_erlang_dist_proxy, NodeConfig), + ok = inet_tcp_proxy:start(ManagerNode, DistPort, ProxyPort), + ok = inet_tcp_proxy:reconnect(Nodes -- [Nodename]). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +ignore(Config) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + block_unblock([{A, B}, {A, C}]), + timer:sleep(?DELAY), + [B, C] = partitions(A), + [A] = partitions(B), + [A] = partitions(C), + ok. + +pause_minority_on_down(Config) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + set_mode(Config, pause_minority), + + true = is_running(A), + + rabbit_ct_broker_helpers:kill_node(Config, B), + timer:sleep(?DELAY), + true = is_running(A), + + rabbit_ct_broker_helpers:kill_node(Config, C), + await_running(A, false), + ok. + +pause_minority_on_blocked(Config) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + set_mode(Config, pause_minority), + pause_on_blocked(A, B, C). + +pause_if_all_down_on_down(Config) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + set_mode(Config, {pause_if_all_down, [C], ignore}), + [(true = is_running(N)) || N <- [A, B, C]], + + rabbit_ct_broker_helpers:kill_node(Config, B), + timer:sleep(?DELAY), + [(true = is_running(N)) || N <- [A, C]], + + rabbit_ct_broker_helpers:kill_node(Config, C), + timer:sleep(?DELAY), + await_running(A, false), + ok. + +pause_if_all_down_on_blocked(Config) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + set_mode(Config, {pause_if_all_down, [C], ignore}), + pause_on_blocked(A, B, C). + +pause_on_blocked(A, B, C) -> + [(true = is_running(N)) || N <- [A, B, C]], + block([{A, B}, {A, C}]), + await_running(A, false), + [await_running(N, true) || N <- [B, C]], + unblock([{A, B}, {A, C}]), + [await_running(N, true) || N <- [A, B, C]], + Status = rpc:call(B, rabbit_mnesia, status, []), + [] = rabbit_misc:pget(partitions, Status), + ok. + +%%% Make sure we do not confirm any messages after a partition has +%%% happened but before we pause, since any such confirmations would be +%%% lies. +%%% +%%% This test has to use an AB cluster (not ABC) since GM ends up +%%% taking longer to detect down slaves when there are more nodes and +%%% we close the window by mistake. +%%% +%%% In general there are quite a few ways to accidentally cause this +%%% test to pass since there are a lot of things in the broker that can +%%% suddenly take several seconds to time out when TCP connections +%%% won't establish. + +pause_minority_false_promises_mirrored(Config) -> + rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<".*">>, <<"all">>), + pause_false_promises(Config, pause_minority). + +pause_minority_false_promises_unmirrored(Config) -> + pause_false_promises(Config, pause_minority). + +pause_if_all_down_false_promises_mirrored(Config) -> + rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<".*">>, <<"all">>), + B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename), + pause_false_promises(Config, {pause_if_all_down, [B], ignore}). + +pause_if_all_down_false_promises_unmirrored(Config) -> + B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename), + pause_false_promises(Config, {pause_if_all_down, [B], ignore}). + +pause_false_promises(Config, ClusterPartitionHandling) -> + [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + set_mode(Config, [A], ClusterPartitionHandling), + ChA = rabbit_ct_client_helpers:open_channel(Config, A), + ChB = rabbit_ct_client_helpers:open_channel(Config, B), + amqp_channel:call(ChB, #'queue.declare'{queue = <<"test">>, + durable = true}), + amqp_channel:call(ChA, #'confirm.select'{}), + amqp_channel:register_confirm_handler(ChA, self()), + + %% Cause a partition after 1s + Self = self(), + spawn_link(fun () -> + timer:sleep(1000), + %%io:format(user, "~p BLOCK~n", [calendar:local_time()]), + block([{A, B}]), + unlink(Self) + end), + + %% Publish large no of messages, see how many we get confirmed + [amqp_channel:cast(ChA, #'basic.publish'{routing_key = <<"test">>}, + #amqp_msg{props = #'P_basic'{delivery_mode = 1}}) || + _ <- lists:seq(1, 100000)], + %%io:format(user, "~p finish publish~n", [calendar:local_time()]), + + %% Time for the partition to be detected. We don't put this sleep + %% in receive_acks since otherwise we'd have another similar sleep + %% at the end. + timer:sleep(30000), + Confirmed = receive_acks(0), + %%io:format(user, "~p got acks~n", [calendar:local_time()]), + await_running(A, false), + %%io:format(user, "~p A stopped~n", [calendar:local_time()]), + + unblock([{A, B}]), + await_running(A, true), + + %% But how many made it onto the rest of the cluster? + #'queue.declare_ok'{message_count = Survived} = + amqp_channel:call(ChB, #'queue.declare'{queue = <<"test">>, + durable = true}), + %%io:format(user, "~p queue declared~n", [calendar:local_time()]), + case Confirmed > Survived of + true -> io:format("Confirmed=~p Survived=~p~n", [Confirmed, Survived]); + false -> ok + end, + true = (Confirmed =< Survived), + + rabbit_ct_client_helpers:close_channel(ChB), + rabbit_ct_client_helpers:close_channel(ChA), + ok. + +receive_acks(Max) -> + receive + #'basic.ack'{delivery_tag = DTag} -> + receive_acks(DTag) + after ?DELAY -> + Max + end. + +prompt_disconnect_detection(Config) -> + [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + ChB = rabbit_ct_client_helpers:open_channel(Config, B), + [amqp_channel:call(ChB, #'queue.declare'{}) || _ <- lists:seq(1, 100)], + block([{A, B}]), + timer:sleep(?DELAY), + %% We want to make sure we do not end up waiting for setuptime * + %% no of queues. Unfortunately that means we need a timeout... + [] = rabbit_ct_broker_helpers:rpc(Config, A, + rabbit_amqqueue, info_all, [<<"/">>], ?DELAY), + rabbit_ct_client_helpers:close_channel(ChB), + ok. + +ctl_ticktime_sync(Config) -> + %% Server has 1s net_ticktime, make sure ctl doesn't get disconnected + Cmd = ["eval", "timer:sleep(5000)."], + {ok, "ok\n"} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, Cmd). + +%% NB: we test full and partial partitions here. +autoheal(Config) -> + set_mode(Config, autoheal), + do_autoheal(Config). + +autoheal_after_pause_if_all_down(Config) -> + [_, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + set_mode(Config, {pause_if_all_down, [B, C], autoheal}), + do_autoheal(Config). + +do_autoheal(Config) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Test = fun (Pairs) -> + block_unblock(Pairs), + %% Sleep to make sure all the partitions are noticed + %% ?DELAY for the net_tick timeout + timer:sleep(?DELAY), + [await_listening(N, true) || N <- [A, B, C]], + [await_partitions(N, []) || N <- [A, B, C]] + end, + Test([{B, C}]), + Test([{A, C}, {B, C}]), + Test([{A, B}, {A, C}, {B, C}]), + ok. + +partial_false_positive(Config) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + block([{A, B}]), + timer:sleep(1000), + block([{A, C}]), + timer:sleep(?DELAY), + unblock([{A, B}, {A, C}]), + timer:sleep(?DELAY), + %% When B times out A's connection, it will check with C. C will + %% not have timed out A yet, but already it can't talk to it. We + %% need to not consider this a partial partition; B and C should + %% still talk to each other. + [B, C] = partitions(A), + [A] = partitions(B), + [A] = partitions(C), + ok. + +partial_to_full(Config) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + block_unblock([{A, B}]), + timer:sleep(?DELAY), + %% There are several valid ways this could go, depending on how + %% the DOWN messages race: either A gets disconnected first and BC + %% stay together, or B gets disconnected first and AC stay + %% together, or both make it through and all three get + %% disconnected. + case {partitions(A), partitions(B), partitions(C)} of + {[B, C], [A], [A]} -> ok; + {[B], [A, C], [B]} -> ok; + {[B, C], [A, C], [A, B]} -> ok; + Partitions -> exit({partitions, Partitions}) + end. + +partial_pause_minority(Config) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + set_mode(Config, pause_minority), + block([{A, B}]), + [await_running(N, false) || N <- [A, B]], + await_running(C, true), + unblock([{A, B}]), + [await_listening(N, true) || N <- [A, B, C]], + [await_partitions(N, []) || N <- [A, B, C]], + ok. + +partial_pause_if_all_down(Config) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + set_mode(Config, {pause_if_all_down, [B], ignore}), + block([{A, B}]), + await_running(A, false), + [await_running(N, true) || N <- [B, C]], + unblock([{A, B}]), + [await_listening(N, true) || N <- [A, B, C]], + [await_partitions(N, []) || N <- [A, B, C]], + ok. + +set_mode(Config, Mode) -> + rabbit_ct_broker_helpers:rpc_all(Config, + application, set_env, [rabbit, cluster_partition_handling, Mode]). + +set_mode(Config, Nodes, Mode) -> + rabbit_ct_broker_helpers:rpc(Config, Nodes, + application, set_env, [rabbit, cluster_partition_handling, Mode]). + +block_unblock(Pairs) -> + block(Pairs), + timer:sleep(?DELAY), + unblock(Pairs). + +block(Pairs) -> [block(X, Y) || {X, Y} <- Pairs]. +unblock(Pairs) -> [allow(X, Y) || {X, Y} <- Pairs]. + +partitions(Node) -> + case rpc:call(Node, rabbit_node_monitor, partitions, []) of + {badrpc, {'EXIT', E}} = R -> case rabbit_misc:is_abnormal_exit(E) of + true -> R; + false -> timer:sleep(1000), + partitions(Node) + end; + Partitions -> Partitions + end. + +block(X, Y) -> + rpc:call(X, inet_tcp_proxy, block, [Y]), + rpc:call(Y, inet_tcp_proxy, block, [X]). + +allow(X, Y) -> + rpc:call(X, inet_tcp_proxy, allow, [Y]), + rpc:call(Y, inet_tcp_proxy, allow, [X]). + +await_running (Node, Bool) -> await(Node, Bool, fun is_running/1). +await_listening (Node, Bool) -> await(Node, Bool, fun is_listening/1). +await_partitions(Node, Parts) -> await(Node, Parts, fun partitions/1). + +await(Node, Res, Fun) -> + case Fun(Node) of + Res -> ok; + _ -> timer:sleep(100), + await(Node, Res, Fun) + end. + +is_running(Node) -> rpc:call(Node, rabbit, is_running, []). + +is_listening(Node) -> + case rpc:call(Node, rabbit_networking, node_listeners, [Node]) of + [] -> false; + [_|_] -> true; + _ -> false + end. diff --git a/test/priority_queue_SUITE.erl b/test/priority_queue_SUITE.erl new file mode 100644 index 000000000000..5df568609079 --- /dev/null +++ b/test/priority_queue_SUITE.erl @@ -0,0 +1,558 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2011-2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(priority_queue_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile(export_all). + +all() -> + [ + {group, cluster_size_2}, + {group, cluster_size_3} + ]. + +groups() -> + [ + {cluster_size_2, [], [ + {parallel_tests, [parallel], [ + ackfold, + drop, + dropwhile_fetchwhile, + info_head_message_timestamp, + matching, + mirror_queue_sync, + mirror_queue_sync_priority_above_max, + mirror_queue_sync_priority_above_max_pending_ack, + purge, + requeue, + resume, + simple_order, + straight_through + ]}, + {non_parallel_tests, [], [ + recovery %% Restart RabbitMQ. + ]} + ]}, + {cluster_size_3, [], [ + {parallel_tests, [parallel], [ + mirror_queue_auto_ack + ]} + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(cluster_size_2, Config) -> + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_count, 2} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()); +init_per_group(cluster_size_3, Config) -> + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_count, 3} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()); +init_per_group(_, Config) -> + Config. + +end_per_group(ClusterSizeGroup, Config) +when ClusterSizeGroup =:= cluster_size_2 +orelse ClusterSizeGroup =:= cluster_size_3 -> + rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()); +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +%% The BQ API is used in all sorts of places in all sorts of +%% ways. Therefore we have to jump through a few different hoops +%% in order to integration-test it. +%% +%% * start/1, stop/0, init/3, terminate/2, delete_and_terminate/2 +%% - starting and stopping rabbit. durable queues / persistent msgs needed +%% to test recovery +%% +%% * publish/5, drain_confirmed/1, fetch/2, ack/2, is_duplicate/2, msg_rates/1, +%% needs_timeout/1, timeout/1, invoke/3, resume/1 [0] +%% - regular publishing and consuming, with confirms and acks and durability +%% +%% * publish_delivered/4 - publish with acks straight through +%% * discard/3 - publish without acks straight through +%% * dropwhile/2 - expire messages without DLX +%% * fetchwhile/4 - expire messages with DLX +%% * ackfold/4 - reject messages with DLX +%% * requeue/2 - reject messages without DLX +%% * drop/2 - maxlen messages without DLX +%% * purge/1 - issue AMQP queue.purge +%% * purge_acks/1 - mirror queue explicit sync with unacked msgs +%% * fold/3 - mirror queue explicit sync +%% * depth/1 - mirror queue implicit sync detection +%% * len/1, is_empty/1 - info items +%% * handle_pre_hibernate/1 - hibernation +%% +%% * set_ram_duration_target/2, ram_duration/1, status/1 +%% - maybe need unit testing? +%% +%% [0] publish enough to get credit flow from msg store + +recovery(Config) -> + {Conn, Ch} = open(Config), + Q = <<"recovery-queue">>, + declare(Ch, Q, 3), + publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]), + amqp_connection:close(Conn), + + %% TODO This terminates the automatically open connection and breaks + %% coverage. + rabbit_ct_broker_helpers:restart_broker(Config, 0), + + {Conn2, Ch2} = open(Config), + get_all(Ch2, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]), + delete(Ch2, Q), + amqp_connection:close(Conn2), + passed. + +simple_order(Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + Q = <<"simple_order-queue">>, + declare(Ch, Q, 3), + publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]), + get_all(Ch, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]), + publish(Ch, Q, [2, 3, 1, 2, 3, 1, 2, 3, 1]), + get_all(Ch, Q, no_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]), + publish(Ch, Q, [3, 1, 2, 3, 1, 2, 3, 1, 2]), + get_all(Ch, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]), + delete(Ch, Q), + rabbit_ct_client_helpers:close_channel(Ch), + passed. + +matching(Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + Q = <<"matching-queue">>, + declare(Ch, Q, 5), + %% We round priority down, and 0 is the default + publish(Ch, Q, [undefined, 0, 5, 10, undefined]), + get_all(Ch, Q, do_ack, [5, 10, undefined, 0, undefined]), + delete(Ch, Q), + rabbit_ct_client_helpers:close_channel(Ch), + passed. + +resume(Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + Q = <<"resume-queue">>, + declare(Ch, Q, 5), + amqp_channel:call(Ch, #'confirm.select'{}), + publish_many(Ch, Q, 10000), + amqp_channel:wait_for_confirms(Ch), + amqp_channel:call(Ch, #'queue.purge'{queue = Q}), %% Assert it exists + delete(Ch, Q), + rabbit_ct_client_helpers:close_channel(Ch), + passed. + +straight_through(Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + Q = <<"straight_through-queue">>, + declare(Ch, Q, 3), + [begin + consume(Ch, Q, Ack), + [begin + publish1(Ch, Q, P), + assert_delivered(Ch, Ack, P) + end || P <- [1, 2, 3]], + cancel(Ch) + end || Ack <- [do_ack, no_ack]], + get_empty(Ch, Q), + delete(Ch, Q), + rabbit_ct_client_helpers:close_channel(Ch), + passed. + +dropwhile_fetchwhile(Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + Q = <<"dropwhile_fetchwhile-queue">>, + [begin + declare(Ch, Q, Args ++ arguments(3)), + publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]), + timer:sleep(10), + get_empty(Ch, Q), + delete(Ch, Q) + end || + Args <- [[{<<"x-message-ttl">>, long, 1}], + [{<<"x-message-ttl">>, long, 1}, + {<<"x-dead-letter-exchange">>, longstr, <<"amq.fanout">>}] + ]], + rabbit_ct_client_helpers:close_channel(Ch), + passed. + +ackfold(Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + Q = <<"ackfolq-queue1">>, + Q2 = <<"ackfold-queue2">>, + declare(Ch, Q, + [{<<"x-dead-letter-exchange">>, longstr, <<>>}, + {<<"x-dead-letter-routing-key">>, longstr, Q2} + | arguments(3)]), + declare(Ch, Q2, none), + publish(Ch, Q, [1, 2, 3]), + [_, _, DTag] = get_all(Ch, Q, manual_ack, [3, 2, 1]), + amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag, + multiple = true, + requeue = false}), + timer:sleep(100), + get_all(Ch, Q2, do_ack, [3, 2, 1]), + delete(Ch, Q), + delete(Ch, Q2), + rabbit_ct_client_helpers:close_channel(Ch), + passed. + +requeue(Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + Q = <<"requeue-queue">>, + declare(Ch, Q, 3), + publish(Ch, Q, [1, 2, 3]), + [_, _, DTag] = get_all(Ch, Q, manual_ack, [3, 2, 1]), + amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag, + multiple = true, + requeue = true}), + get_all(Ch, Q, do_ack, [3, 2, 1]), + delete(Ch, Q), + rabbit_ct_client_helpers:close_channel(Ch), + passed. + +drop(Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + Q = <<"drop-queue">>, + declare(Ch, Q, [{<<"x-max-length">>, long, 4} | arguments(3)]), + publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]), + %% We drop from the head, so this is according to the "spec" even + %% if not likely to be what the user wants. + get_all(Ch, Q, do_ack, [2, 1, 1, 1]), + delete(Ch, Q), + rabbit_ct_client_helpers:close_channel(Ch), + passed. + +purge(Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + Q = <<"purge-queue">>, + declare(Ch, Q, 3), + publish(Ch, Q, [1, 2, 3]), + amqp_channel:call(Ch, #'queue.purge'{queue = Q}), + get_empty(Ch, Q), + delete(Ch, Q), + rabbit_ct_client_helpers:close_channel(Ch), + passed. + +info_head_message_timestamp(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, info_head_message_timestamp1, [Config]). + +info_head_message_timestamp1(_Config) -> + QName = rabbit_misc:r(<<"/">>, queue, + <<"info_head_message_timestamp-queue">>), + Q0 = rabbit_amqqueue:pseudo_queue(QName, self()), + Q = Q0#amqqueue{arguments = [{<<"x-max-priority">>, long, 2}]}, + PQ = rabbit_priority_queue, + BQS1 = PQ:init(Q, new, fun(_, _) -> ok end), + %% The queue is empty: no timestamp. + true = PQ:is_empty(BQS1), + '' = PQ:info(head_message_timestamp, BQS1), + %% Publish one message with timestamp 1000. + Msg1 = #basic_message{ + id = msg1, + content = #content{ + properties = #'P_basic'{ + priority = 1, + timestamp = 1000 + }}, + is_persistent = false + }, + BQS2 = PQ:publish(Msg1, #message_properties{size = 0}, false, self(), + noflow, BQS1), + 1000 = PQ:info(head_message_timestamp, BQS2), + %% Publish a higher priority message with no timestamp. + Msg2 = #basic_message{ + id = msg2, + content = #content{ + properties = #'P_basic'{ + priority = 2 + }}, + is_persistent = false + }, + BQS3 = PQ:publish(Msg2, #message_properties{size = 0}, false, self(), + noflow, BQS2), + '' = PQ:info(head_message_timestamp, BQS3), + %% Consume message with no timestamp. + {{Msg2, _, _}, BQS4} = PQ:fetch(false, BQS3), + 1000 = PQ:info(head_message_timestamp, BQS4), + %% Consume message with timestamp 1000, but do not acknowledge it + %% yet. The goal is to verify that the unacknowledged message's + %% timestamp is returned. + {{Msg1, _, AckTag}, BQS5} = PQ:fetch(true, BQS4), + 1000 = PQ:info(head_message_timestamp, BQS5), + %% Ack message. The queue is empty now. + {[msg1], BQS6} = PQ:ack([AckTag], BQS5), + true = PQ:is_empty(BQS6), + '' = PQ:info(head_message_timestamp, BQS6), + PQ:delete_and_terminate(a_whim, BQS6), + passed. + +ram_duration(_Config) -> + QName = rabbit_misc:r(<<"/">>, queue, <<"ram_duration-queue">>), + Q0 = rabbit_amqqueue:pseudo_queue(QName, self()), + Q = Q0#amqqueue{arguments = [{<<"x-max-priority">>, long, 5}]}, + PQ = rabbit_priority_queue, + BQS1 = PQ:init(Q, new, fun(_, _) -> ok end), + {_Duration1, BQS2} = PQ:ram_duration(BQS1), + BQS3 = PQ:set_ram_duration_target(infinity, BQS2), + BQS4 = PQ:set_ram_duration_target(1, BQS3), + {_Duration2, BQS5} = PQ:ram_duration(BQS4), + PQ:delete_and_terminate(a_whim, BQS5), + passed. + +mirror_queue_sync(Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + Q = <<"mirror_queue_sync-queue">>, + declare(Ch, Q, 3), + publish(Ch, Q, [1, 2, 3]), + ok = rabbit_ct_broker_helpers:set_ha_policy(Config, 0, + <<"^mirror_queue_sync-queue$">>, <<"all">>), + publish(Ch, Q, [1, 2, 3, 1, 2, 3]), + %% master now has 9, slave 6. + get_partial(Ch, Q, manual_ack, [3, 3, 3, 2, 2, 2]), + %% So some but not all are unacked at the slave + Nodename0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + rabbit_ct_broker_helpers:control_action(sync_queue, Nodename0, + [binary_to_list(Q)], [{"-p", "/"}]), + wait_for_sync(Config, Nodename0, rabbit_misc:r(<<"/">>, queue, Q)), + passed. + +mirror_queue_sync_priority_above_max(Config) -> + A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + %% Tests synchronisation of slaves when priority is higher than max priority. + %% This causes an infinity loop (and test timeout) before rabbitmq-server-795 + Ch = rabbit_ct_client_helpers:open_channel(Config, A), + Q = <<"mirror_queue_sync_priority_above_max-queue">>, + declare(Ch, Q, 3), + publish(Ch, Q, [5, 5, 5]), + ok = rabbit_ct_broker_helpers:set_ha_policy(Config, A, + <<".*">>, <<"all">>), + rabbit_ct_broker_helpers:control_action(sync_queue, A, + [binary_to_list(Q)], [{"-p", "/"}]), + wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q)), + delete(Ch, Q), + passed. + +mirror_queue_sync_priority_above_max_pending_ack(Config) -> + [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + %% Tests synchronisation of slaves when priority is higher than max priority + %% and there are pending acks. + %% This causes an infinity loop (and test timeout) before rabbitmq-server-795 + Ch = rabbit_ct_client_helpers:open_channel(Config, A), + Q = <<"mirror_queue_sync_priority_above_max_pending_ack-queue">>, + declare(Ch, Q, 3), + publish(Ch, Q, [5, 5, 5]), + %% Consume but 'forget' to acknowledge + get_without_ack(Ch, Q), + get_without_ack(Ch, Q), + ok = rabbit_ct_broker_helpers:set_ha_policy(Config, A, + <<".*">>, <<"all">>), + rabbit_ct_broker_helpers:control_action(sync_queue, A, + [binary_to_list(Q)], [{"-p", "/"}]), + wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q)), + synced_msgs(Config, A, rabbit_misc:r(<<"/">>, queue, Q), 3), + synced_msgs(Config, B, rabbit_misc:r(<<"/">>, queue, Q), 3), + delete(Ch, Q), + passed. + +mirror_queue_auto_ack(Config) -> + A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + %% Check correct use of AckRequired in the notifications to the slaves. + %% If slaves are notified with AckRequired == true when it is false, + %% the slaves will crash with the depth notification as they will not + %% match the master delta. + %% Bug rabbitmq-server 687 + Ch = rabbit_ct_client_helpers:open_channel(Config, A), + Q = <<"mirror_queue_auto_ack-queue">>, + declare(Ch, Q, 3), + publish(Ch, Q, [1, 2, 3]), + ok = rabbit_ct_broker_helpers:set_ha_policy(Config, A, + <<".*">>, <<"all">>), + get_partial(Ch, Q, no_ack, [3, 2, 1]), + + %% Retrieve slaves + SPids = slave_pids(Config, A, rabbit_misc:r(<<"/">>, queue, Q)), + [{SNode1, _SPid1}, {SNode2, SPid2}] = nodes_and_pids(SPids), + + %% Restart one of the slaves so `request_depth` is triggered + rabbit_ct_broker_helpers:restart_node(Config, SNode1), + + %% The alive slave must have the same pid after its neighbour is restarted + timer:sleep(3000), %% ugly but we can't know when the `depth` instruction arrives + Slaves = nodes_and_pids(slave_pids(Config, A, rabbit_misc:r(<<"/">>, queue, Q))), + SPid2 = proplists:get_value(SNode2, Slaves), + + delete(Ch, Q), + passed. + +%%---------------------------------------------------------------------------- + +open(Config) -> + Conn = rabbit_ct_client_helpers:open_connection(Config, 0), + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + {Conn, Ch}. + +declare(Ch, Q, Args) when is_list(Args) -> + amqp_channel:call(Ch, #'queue.declare'{queue = Q, + durable = true, + arguments = Args}); +declare(Ch, Q, Max) -> + declare(Ch, Q, arguments(Max)). + +delete(Ch, Q) -> + amqp_channel:call(Ch, #'queue.delete'{queue = Q}). + +publish(Ch, Q, Ps) -> + amqp_channel:call(Ch, #'confirm.select'{}), + [publish1(Ch, Q, P) || P <- Ps], + amqp_channel:wait_for_confirms(Ch). + +publish_many(_Ch, _Q, 0) -> ok; +publish_many( Ch, Q, N) -> publish1(Ch, Q, random:uniform(5)), + publish_many(Ch, Q, N - 1). + +publish1(Ch, Q, P) -> + amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q}, + #amqp_msg{props = props(P), + payload = priority2bin(P)}). + +props(undefined) -> #'P_basic'{delivery_mode = 2}; +props(P) -> #'P_basic'{priority = P, + delivery_mode = 2}. + +consume(Ch, Q, Ack) -> + amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, + no_ack = Ack =:= no_ack, + consumer_tag = <<"ctag">>}, + self()), + receive + #'basic.consume_ok'{consumer_tag = <<"ctag">>} -> + ok + end. + +cancel(Ch) -> + amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = <<"ctag">>}). + +assert_delivered(Ch, Ack, P) -> + PBin = priority2bin(P), + receive + {#'basic.deliver'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} -> + PBin = PBin2, + maybe_ack(Ch, Ack, DTag) + end. + +get_all(Ch, Q, Ack, Ps) -> + DTags = get_partial(Ch, Q, Ack, Ps), + get_empty(Ch, Q), + DTags. + +get_partial(Ch, Q, Ack, Ps) -> + [get_ok(Ch, Q, Ack, P) || P <- Ps]. + +get_empty(Ch, Q) -> + #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = Q}). + +get_ok(Ch, Q, Ack, P) -> + PBin = priority2bin(P), + {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} = + amqp_channel:call(Ch, #'basic.get'{queue = Q, + no_ack = Ack =:= no_ack}), + PBin = PBin2, + maybe_ack(Ch, Ack, DTag). + +get_without_ack(Ch, Q) -> + {#'basic.get_ok'{}, _} = + amqp_channel:call(Ch, #'basic.get'{queue = Q, no_ack = false}). + +maybe_ack(Ch, do_ack, DTag) -> + amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}), + DTag; +maybe_ack(_Ch, _, DTag) -> + DTag. + +arguments(none) -> []; +arguments(Max) -> [{<<"x-max-priority">>, byte, Max}]. + +priority2bin(undefined) -> <<"undefined">>; +priority2bin(Int) -> list_to_binary(integer_to_list(Int)). + +%%---------------------------------------------------------------------------- + +wait_for_sync(Config, Nodename, Q) -> + case synced(Config, Nodename, Q) of + true -> ok; + false -> timer:sleep(100), + wait_for_sync(Config, Nodename, Q) + end. + +synced(Config, Nodename, Q) -> + Info = rabbit_ct_broker_helpers:rpc(Config, Nodename, + rabbit_amqqueue, info_all, [<<"/">>, [name, synchronised_slave_pids]]), + [SSPids] = [Pids || [{name, Q1}, {synchronised_slave_pids, Pids}] <- Info, + Q =:= Q1], + length(SSPids) =:= 1. + +synced_msgs(Config, Nodename, Q, Expected) -> + Info = rabbit_ct_broker_helpers:rpc(Config, Nodename, + rabbit_amqqueue, info_all, [<<"/">>, [name, messages]]), + [M] = [M || [{name, Q1}, {messages, M}] <- Info, Q =:= Q1], + M =:= Expected. + +nodes_and_pids(SPids) -> + lists:zip([node(S) || S <- SPids], SPids). + +slave_pids(Config, Nodename, Q) -> + Info = rabbit_ct_broker_helpers:rpc(Config, Nodename, + rabbit_amqqueue, info_all, [<<"/">>, [name, slave_pids]]), + [SPids] = [SPids || [{name, Q1}, {slave_pids, SPids}] <- Info, + Q =:= Q1], + SPids. + +%%---------------------------------------------------------------------------- diff --git a/test/queue_master_location_SUITE.erl b/test/queue_master_location_SUITE.erl new file mode 100644 index 000000000000..e77f27f14b73 --- /dev/null +++ b/test/queue_master_location_SUITE.erl @@ -0,0 +1,271 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved. +%% + +-module(queue_master_location_SUITE). + +%% These tests use an ABC cluster with each node initialised with +%% a different number of queues. When a queue is declared, different +%% strategies can be applied to determine the queue's master node. Queue +%% location strategies can be applied in the following ways; +%% 1. As policy, +%% 2. As config (in rabbitmq.config), +%% 3. or as part of the queue's declare arguements. +%% +%% Currently supported strategies are; +%% min-masters : The queue master node is calculated as the one with the +%% least bound queues in the cluster. +%% client-local: The queue master node is the local node from which +%% the declaration is being carried out from +%% random : The queue master node is randomly selected. +%% + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile(export_all). + +-define(DEFAULT_VHOST_PATH, (<<"/">>)). +-define(POLICY, <<"^qm.location$">>). + +all() -> + [ + {group, cluster_size_3} + ]. + +groups() -> + [ + {cluster_size_3, [], [ + declare_args, + declare_policy, + declare_config, + calculate_min_master, + calculate_random, + calculate_client_local + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(cluster_size_3, Config) -> + rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_count, 3} %% Replaced with a list of node names later. + ]). + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + ClusterSize = ?config(rmq_nodes_count, Config), + Nodenames = [ + list_to_atom(rabbit_misc:format("~s-~b", [Testcase, I])) + || I <- lists:seq(1, ClusterSize) + ], + TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_count, Nodenames}, + {rmq_nodes_clustered, true}, + {rmq_nodename_suffix, Testcase}, + {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +%% +%% Queue 'declarations' +%% + +declare_args(Config) -> + setup_test_environment(Config), + unset_location_config(Config), + QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>), + Args = [{<<"x-queue-master-locator">>, <<"min-masters">>}], + declare(Config, QueueName, false, false, Args, none), + verify_min_master(Config, Q). + +declare_policy(Config) -> + setup_test_environment(Config), + unset_location_config(Config), + set_location_policy(Config, ?POLICY, <<"min-masters">>), + QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>), + declare(Config, QueueName, false, false, _Args=[], none), + verify_min_master(Config, Q). + +declare_config(Config) -> + setup_test_environment(Config), + set_location_config(Config, <<"min-masters">>), + QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>), + declare(Config, QueueName, false, false, _Args=[], none), + verify_min_master(Config, Q), + unset_location_config(Config), + ok. + +%% +%% Test 'calculations' +%% + +calculate_min_master(Config) -> + setup_test_environment(Config), + QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>), + Args = [{<<"x-queue-master-locator">>, <<"min-masters">>}], + declare(Config, QueueName, false, false, Args, none), + verify_min_master(Config, Q), + ok. + +calculate_random(Config) -> + setup_test_environment(Config), + QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>), + Args = [{<<"x-queue-master-locator">>, <<"random">>}], + declare(Config, QueueName, false, false, Args, none), + verify_random(Config, Q), + ok. + +calculate_client_local(Config) -> + setup_test_environment(Config), + QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>), + Args = [{<<"x-queue-master-locator">>, <<"client-local">>}], + declare(Config, QueueName, false, false, Args, none), + verify_client_local(Config, Q), + ok. + +%% +%% Setup environment +%% + +setup_test_environment(Config) -> + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + [distribute_queues(Config, Node) || Node <- Nodes], + ok. + +distribute_queues(Config, Node) -> + ok = rpc:call(Node, application, unset_env, [rabbit, queue_master_location]), + Count = case rabbit_ct_broker_helpers:nodename_to_index(Config, Node) of + 0 -> 15; + 1 -> 8; + 2 -> 1 + end, + + Channel = rabbit_ct_client_helpers:open_channel(Config, Node), + ok = declare_queues(Channel, declare_fun(), Count), + ok = create_e2e_binding(Channel, [<< "ex_1" >>, << "ex_2" >>]), + {ok, Channel}. + +%% +%% Internal queue handling +%% + +declare_queues(Channel, DeclareFun, 1) -> DeclareFun(Channel); +declare_queues(Channel, DeclareFun, N) -> + DeclareFun(Channel), + declare_queues(Channel, DeclareFun, N-1). + +declare_exchange(Channel, Ex) -> + #'exchange.declare_ok'{} = + amqp_channel:call(Channel, #'exchange.declare'{exchange = Ex}), + {ok, Ex}. + +declare_binding(Channel, Binding) -> + #'exchange.bind_ok'{} = amqp_channel:call(Channel, Binding), + ok. + +declare_fun() -> + fun(Channel) -> + #'queue.declare_ok'{} = amqp_channel:call(Channel, get_random_queue_declare()), + ok + end. + +create_e2e_binding(Channel, ExNamesBin) -> + [{ok, Ex1}, {ok, Ex2}] = [declare_exchange(Channel, Ex) || Ex <- ExNamesBin], + Binding = #'exchange.bind'{source = Ex1, destination = Ex2}, + ok = declare_binding(Channel, Binding). + +get_random_queue_declare() -> + #'queue.declare'{passive = false, + durable = false, + exclusive = true, + auto_delete = false, + nowait = false, + arguments = []}. + +%% +%% Internal helper functions +%% + +get_cluster() -> [node()|nodes()]. + +min_master_node(Config) -> + hd(lists:reverse( + rabbit_ct_broker_helpers:get_node_configs(Config, nodename))). + +set_location_config(Config, Strategy) -> + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + [ok = rpc:call(Node, application, set_env, + [rabbit, queue_master_locator, Strategy]) || Node <- Nodes], + ok. + +unset_location_config(Config) -> + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + [ok = rpc:call(Node, application, unset_env, + [rabbit, queue_master_locator]) || Node <- Nodes], + ok. + +declare(Config, QueueName, Durable, AutoDelete, Args, Owner) -> + Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + {new, Queue} = rpc:call(Node, rabbit_amqqueue, declare, + [QueueName, Durable, AutoDelete, Args, Owner]), + Queue. + +verify_min_master(Config, Q) -> + Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + MinMaster = min_master_node(Config), + {ok, MinMaster} = rpc:call(Node, rabbit_queue_master_location_misc, + lookup_master, [Q, ?DEFAULT_VHOST_PATH]). + +verify_random(Config, Q) -> + [Node | _] = Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, + nodename), + {ok, Master} = rpc:call(Node, rabbit_queue_master_location_misc, + lookup_master, [Q, ?DEFAULT_VHOST_PATH]), + true = lists:member(Master, Nodes). + +verify_client_local(Config, Q) -> + Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + {ok, Node} = rpc:call(Node, rabbit_queue_master_location_misc, + lookup_master, [Q, ?DEFAULT_VHOST_PATH]). + +set_location_policy(Config, Name, Strategy) -> + ok = rabbit_ct_broker_helpers:set_policy(Config, 0, + Name, <<".*">>, <<"queues">>, [{<<"queue-master-locator">>, Strategy}]). diff --git a/test/rabbit_ha_test_consumer.erl b/test/rabbit_ha_test_consumer.erl new file mode 100644 index 000000000000..f374863f6ad3 --- /dev/null +++ b/test/rabbit_ha_test_consumer.erl @@ -0,0 +1,114 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +%% +-module(rabbit_ha_test_consumer). + +-include_lib("amqp_client/include/amqp_client.hrl"). + +-export([await_response/1, create/5, start/6]). + +await_response(ConsumerPid) -> + case receive {ConsumerPid, Response} -> Response end of + {error, Reason} -> erlang:error(Reason); + ok -> ok + end. + +create(Channel, Queue, TestPid, CancelOnFailover, ExpectingMsgs) -> + ConsumerPid = spawn_link(?MODULE, start, + [TestPid, Channel, Queue, CancelOnFailover, + ExpectingMsgs + 1, ExpectingMsgs]), + amqp_channel:subscribe( + Channel, consume_method(Queue, CancelOnFailover), ConsumerPid), + ConsumerPid. + +start(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume) -> + error_logger:info_msg("consumer ~p on ~p awaiting ~w messages " + "(lowest seen = ~w, cancel-on-failover = ~w)~n", + [self(), Channel, MsgsToConsume, LowestSeen, + CancelOnFailover]), + run(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume). + +run(TestPid, _Channel, _Queue, _CancelOnFailover, _LowestSeen, 0) -> + consumer_reply(TestPid, ok); +run(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume) -> + receive + #'basic.consume_ok'{} -> + run(TestPid, Channel, Queue, + CancelOnFailover, LowestSeen, MsgsToConsume); + {Delivery = #'basic.deliver'{ redelivered = Redelivered }, + #amqp_msg{payload = Payload}} -> + MsgNum = list_to_integer(binary_to_list(Payload)), + + ack(Delivery, Channel), + + %% we can receive any message we've already seen and, + %% because of the possibility of multiple requeuings, we + %% might see these messages in any order. If we are seeing + %% a message again, we don't decrement the MsgsToConsume + %% counter. + if + MsgNum + 1 == LowestSeen -> + run(TestPid, Channel, Queue, + CancelOnFailover, MsgNum, MsgsToConsume - 1); + MsgNum >= LowestSeen -> + error_logger:info_msg( + "consumer ~p on ~p ignoring redeliverd msg ~p~n", + [self(), Channel, MsgNum]), + true = Redelivered, %% ASSERTION + run(TestPid, Channel, Queue, + CancelOnFailover, LowestSeen, MsgsToConsume); + true -> + %% We received a message we haven't seen before, + %% but it is not the next message in the expected + %% sequence. + consumer_reply(TestPid, + {error, {unexpected_message, MsgNum}}) + end; + #'basic.cancel'{} when CancelOnFailover -> + error_logger:info_msg("consumer ~p on ~p received basic.cancel: " + "resubscribing to ~p on ~p~n", + [self(), Channel, Queue, Channel]), + resubscribe(TestPid, Channel, Queue, CancelOnFailover, + LowestSeen, MsgsToConsume); + #'basic.cancel'{} -> + exit(cancel_received_without_cancel_on_failover) + end. + +%% +%% Private API +%% + +resubscribe(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, + MsgsToConsume) -> + amqp_channel:subscribe( + Channel, consume_method(Queue, CancelOnFailover), self()), + ok = receive #'basic.consume_ok'{} -> ok + end, + error_logger:info_msg("re-subscripting consumer ~p on ~p complete " + "(received basic.consume_ok)", + [self(), Channel]), + start(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume). + +consume_method(Queue, CancelOnFailover) -> + Args = [{<<"x-cancel-on-ha-failover">>, bool, CancelOnFailover}], + #'basic.consume'{queue = Queue, + arguments = Args}. + +ack(#'basic.deliver'{delivery_tag = DeliveryTag}, Channel) -> + amqp_channel:call(Channel, #'basic.ack'{delivery_tag = DeliveryTag}), + ok. + +consumer_reply(TestPid, Reply) -> + TestPid ! {self(), Reply}. diff --git a/test/rabbit_ha_test_producer.erl b/test/rabbit_ha_test_producer.erl new file mode 100644 index 000000000000..66dee3f7a3cd --- /dev/null +++ b/test/rabbit_ha_test_producer.erl @@ -0,0 +1,119 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +%% +-module(rabbit_ha_test_producer). + +-export([await_response/1, start/5, create/5]). + +-include_lib("amqp_client/include/amqp_client.hrl"). + +await_response(ProducerPid) -> + error_logger:info_msg("waiting for producer pid ~p~n", [ProducerPid]), + case receive {ProducerPid, Response} -> Response end of + ok -> ok; + {error, _} = Else -> exit(Else); + Else -> exit({weird_response, Else}) + end. + +create(Channel, Queue, TestPid, Confirm, MsgsToSend) -> + ProducerPid = spawn_link(?MODULE, start, [Channel, Queue, TestPid, + Confirm, MsgsToSend]), + receive + {ProducerPid, started} -> ProducerPid + end. + +start(Channel, Queue, TestPid, Confirm, MsgsToSend) -> + ConfirmState = + case Confirm of + true -> amqp_channel:register_confirm_handler(Channel, self()), + #'confirm.select_ok'{} = + amqp_channel:call(Channel, #'confirm.select'{}), + gb_trees:empty(); + false -> none + end, + TestPid ! {self(), started}, + error_logger:info_msg("publishing ~w msgs on ~p~n", [MsgsToSend, Channel]), + producer(Channel, Queue, TestPid, ConfirmState, MsgsToSend). + +%% +%% Private API +%% + +producer(_Channel, _Queue, TestPid, none, 0) -> + TestPid ! {self(), ok}; +producer(Channel, _Queue, TestPid, ConfirmState, 0) -> + error_logger:info_msg("awaiting confirms on channel ~p~n", [Channel]), + Msg = case drain_confirms(no_nacks, ConfirmState) of + no_nacks -> ok; + nacks -> {error, received_nacks}; + {Nacks, CS} -> {error, {missing_confirms, Nacks, + lists:sort(gb_trees:keys(CS))}} + end, + TestPid ! {self(), Msg}; + +producer(Channel, Queue, TestPid, ConfirmState, MsgsToSend) -> + Method = #'basic.publish'{exchange = <<"">>, + routing_key = Queue, + mandatory = false, + immediate = false}, + + ConfirmState1 = maybe_record_confirm(ConfirmState, Channel, MsgsToSend), + + amqp_channel:call(Channel, Method, + #amqp_msg{props = #'P_basic'{delivery_mode = 2}, + payload = list_to_binary( + integer_to_list(MsgsToSend))}), + + producer(Channel, Queue, TestPid, ConfirmState1, MsgsToSend - 1). + +maybe_record_confirm(none, _, _) -> + none; +maybe_record_confirm(ConfirmState, Channel, MsgsToSend) -> + SeqNo = amqp_channel:next_publish_seqno(Channel), + gb_trees:insert(SeqNo, MsgsToSend, ConfirmState). + +drain_confirms(Nacks, ConfirmState) -> + case gb_trees:is_empty(ConfirmState) of + true -> Nacks; + false -> receive + #'basic.ack'{delivery_tag = DeliveryTag, + multiple = IsMulti} -> + drain_confirms(Nacks, + delete_confirms(DeliveryTag, IsMulti, + ConfirmState)); + #'basic.nack'{delivery_tag = DeliveryTag, + multiple = IsMulti} -> + drain_confirms(nacks, + delete_confirms(DeliveryTag, IsMulti, + ConfirmState)) + after + 60000 -> {Nacks, ConfirmState} + end + end. + +delete_confirms(DeliveryTag, false, ConfirmState) -> + gb_trees:delete(DeliveryTag, ConfirmState); +delete_confirms(DeliveryTag, true, ConfirmState) -> + multi_confirm(DeliveryTag, ConfirmState). + +multi_confirm(DeliveryTag, ConfirmState) -> + case gb_trees:is_empty(ConfirmState) of + true -> ConfirmState; + false -> {Key, _, ConfirmState1} = gb_trees:take_smallest(ConfirmState), + case Key =< DeliveryTag of + true -> multi_confirm(DeliveryTag, ConfirmState1); + false -> ConfirmState + end + end. diff --git a/test/simple_ha_SUITE.erl b/test/simple_ha_SUITE.erl new file mode 100644 index 000000000000..af85ad6d3b15 --- /dev/null +++ b/test/simple_ha_SUITE.erl @@ -0,0 +1,216 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(simple_ha_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile(export_all). + +all() -> + [ + {group, cluster_size_2}, + {group, cluster_size_3} + ]. + +groups() -> + [ + {cluster_size_2, [], [ + rapid_redeclare, + declare_synchrony + ]}, + {cluster_size_3, [], [ + consume_survives_stop, + consume_survives_sigkill, + consume_survives_policy, + auto_resume, + auto_resume_no_ccn_client, + confirms_survive_stop, + confirms_survive_sigkill, + confirms_survive_policy + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(cluster_size_2, Config) -> + rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_count, 2} + ]); +init_per_group(cluster_size_3, Config) -> + rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_count, 3} + ]). + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + ClusterSize = ?config(rmq_nodes_count, Config), + TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_clustered, true}, + {rmq_nodename_suffix, Testcase}, + {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps() ++ [ + fun rabbit_ct_broker_helpers:set_ha_policy_all/1 + ]). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +rapid_redeclare(Config) -> + A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, A), + Queue = <<"test">>, + [begin + amqp_channel:call(Ch, #'queue.declare'{queue = Queue, + durable = true}), + amqp_channel:call(Ch, #'queue.delete'{queue = Queue}) + end || _I <- lists:seq(1, 20)], + ok. + +%% Check that by the time we get a declare-ok back, the slaves are up +%% and in Mnesia. +declare_synchrony(Config) -> + [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, + nodename), + RabbitCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit), + HareCh = rabbit_ct_client_helpers:open_channel(Config, Hare), + Q = <<"mirrored-queue">>, + declare(RabbitCh, Q), + amqp_channel:call(RabbitCh, #'confirm.select'{}), + amqp_channel:cast(RabbitCh, #'basic.publish'{routing_key = Q}, + #amqp_msg{props = #'P_basic'{delivery_mode = 2}}), + amqp_channel:wait_for_confirms(RabbitCh), + rabbit_ct_broker_helpers:kill_node(Config, Rabbit), + + #'queue.declare_ok'{message_count = 1} = declare(HareCh, Q), + ok. + +declare(Ch, Name) -> + amqp_channel:call(Ch, #'queue.declare'{durable = true, queue = Name}). + +consume_survives_stop(Cf) -> consume_survives(Cf, fun stop/2, true). +consume_survives_sigkill(Cf) -> consume_survives(Cf, fun sigkill/2, true). +consume_survives_policy(Cf) -> consume_survives(Cf, fun policy/2, true). +auto_resume(Cf) -> consume_survives(Cf, fun sigkill/2, false). +auto_resume_no_ccn_client(Cf) -> consume_survives(Cf, fun sigkill/2, false, + false). + +confirms_survive_stop(Cf) -> confirms_survive(Cf, fun stop/2). +confirms_survive_sigkill(Cf) -> confirms_survive(Cf, fun sigkill/2). +confirms_survive_policy(Cf) -> confirms_survive(Cf, fun policy/2). + +%%---------------------------------------------------------------------------- + +consume_survives(Config, DeathFun, CancelOnFailover) -> + consume_survives(Config, DeathFun, CancelOnFailover, true). + +consume_survives(Config, + DeathFun, CancelOnFailover, CCNSupported) -> + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000), + Channel1 = rabbit_ct_client_helpers:open_channel(Config, A), + Channel2 = rabbit_ct_client_helpers:open_channel(Config, B), + Channel3 = rabbit_ct_client_helpers:open_channel(Config, C), + + %% declare the queue on the master, mirrored to the two slaves + Queue = <<"test">>, + amqp_channel:call(Channel1, #'queue.declare'{queue = Queue, + auto_delete = false}), + + %% start up a consumer + ConsCh = case CCNSupported of + true -> Channel2; + false -> Port = rabbit_ct_broker_helpers:get_node_config( + Config, B, tcp_port_amqp), + open_incapable_channel(Port) + end, + ConsumerPid = rabbit_ha_test_consumer:create( + ConsCh, Queue, self(), CancelOnFailover, Msgs), + + %% send a bunch of messages from the producer + ProducerPid = rabbit_ha_test_producer:create(Channel3, Queue, + self(), false, Msgs), + DeathFun(Config, A), + %% verify that the consumer got all msgs, or die - the await_response + %% calls throw an exception if anything goes wrong.... + rabbit_ha_test_consumer:await_response(ConsumerPid), + rabbit_ha_test_producer:await_response(ProducerPid), + ok. + +confirms_survive(Config, DeathFun) -> + [A, B, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000), + Node1Channel = rabbit_ct_client_helpers:open_channel(Config, A), + Node2Channel = rabbit_ct_client_helpers:open_channel(Config, B), + + %% declare the queue on the master, mirrored to the two slaves + Queue = <<"test">>, + amqp_channel:call(Node1Channel,#'queue.declare'{queue = Queue, + auto_delete = false, + durable = true}), + + %% send a bunch of messages from the producer + ProducerPid = rabbit_ha_test_producer:create(Node2Channel, Queue, + self(), true, Msgs), + DeathFun(Config, A), + rabbit_ha_test_producer:await_response(ProducerPid), + ok. + +stop(Config, Node) -> + rabbit_ct_broker_helpers:stop_node_after(Config, Node, 50). + +sigkill(Config, Node) -> + rabbit_ct_broker_helpers:kill_node_after(Config, Node, 50). + +policy(Config, Node)-> + Nodes = [ + rabbit_misc:atom_to_binary(N) + || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + N =/= Node], + rabbit_ct_broker_helpers:set_ha_policy(Config, Node, <<".*">>, + {<<"nodes">>, Nodes}). + +open_incapable_channel(NodePort) -> + Props = [{<<"capabilities">>, table, []}], + {ok, ConsConn} = + amqp_connection:start(#amqp_params_network{port = NodePort, + client_properties = Props}), + {ok, Ch} = amqp_connection:open_channel(ConsConn), + Ch. diff --git a/test/sync_detection_SUITE.erl b/test/sync_detection_SUITE.erl new file mode 100644 index 000000000000..1e0a66e8fdd7 --- /dev/null +++ b/test/sync_detection_SUITE.erl @@ -0,0 +1,252 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(sync_detection_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile(export_all). + +-define(LOOP_RECURSION_DELAY, 100). + +all() -> + [ + {group, cluster_size_2}, + {group, cluster_size_3} + ]. + +groups() -> + [ + {cluster_size_2, [], [ + slave_synchronization + ]}, + {cluster_size_3, [], [ + slave_synchronization_ttl + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(cluster_size_2, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]); +init_per_group(cluster_size_3, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]). + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + ClusterSize = ?config(rmq_nodes_count, Config), + TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_count, ClusterSize}, + {rmq_nodes_clustered, true}, + {rmq_nodename_suffix, Testcase}, + {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps() ++ [ + fun rabbit_ct_broker_helpers:set_ha_policy_two_pos/1, + fun rabbit_ct_broker_helpers:set_ha_policy_two_pos_batch_sync/1 + ]). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +slave_synchronization(Config) -> + [Master, Slave] = rabbit_ct_broker_helpers:get_node_configs(Config, + nodename), + Channel = rabbit_ct_client_helpers:open_channel(Config, Master), + Queue = <<"ha.two.test">>, + #'queue.declare_ok'{} = + amqp_channel:call(Channel, #'queue.declare'{queue = Queue, + auto_delete = false}), + + %% The comments on the right are the queue length and the pending acks on + %% the master. + rabbit_ct_broker_helpers:stop_broker(Config, Slave), + + %% We get and ack one message when the slave is down, and check that when we + %% start the slave it's not marked as synced until ack the message. We also + %% publish another message when the slave is up. + send_dummy_message(Channel, Queue), % 1 - 0 + {#'basic.get_ok'{delivery_tag = Tag1}, _} = + amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 0 - 1 + + rabbit_ct_broker_helpers:start_broker(Config, Slave), + + slave_unsynced(Master, Queue), + send_dummy_message(Channel, Queue), % 1 - 1 + slave_unsynced(Master, Queue), + + amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag1}), % 1 - 0 + + slave_synced(Master, Queue), + + %% We restart the slave and we send a message, so that the slave will only + %% have one of the messages. + rabbit_ct_broker_helpers:stop_broker(Config, Slave), + rabbit_ct_broker_helpers:start_broker(Config, Slave), + + send_dummy_message(Channel, Queue), % 2 - 0 + + slave_unsynced(Master, Queue), + + %% We reject the message that the slave doesn't have, and verify that it's + %% still unsynced + {#'basic.get_ok'{delivery_tag = Tag2}, _} = + amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 1 - 1 + slave_unsynced(Master, Queue), + amqp_channel:cast(Channel, #'basic.reject'{ delivery_tag = Tag2, + requeue = true }), % 2 - 0 + slave_unsynced(Master, Queue), + {#'basic.get_ok'{delivery_tag = Tag3}, _} = + amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 1 - 1 + amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag3}), % 1 - 0 + slave_synced(Master, Queue), + {#'basic.get_ok'{delivery_tag = Tag4}, _} = + amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 0 - 1 + amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag4}), % 0 - 0 + slave_synced(Master, Queue). + +slave_synchronization_ttl(Config) -> + [Master, Slave, DLX] = rabbit_ct_broker_helpers:get_node_configs(Config, + nodename), + Channel = rabbit_ct_client_helpers:open_channel(Config, Master), + DLXChannel = rabbit_ct_client_helpers:open_channel(Config, DLX), + + %% We declare a DLX queue to wait for messages to be TTL'ed + DLXQueue = <<"dlx-queue">>, + #'queue.declare_ok'{} = + amqp_channel:call(Channel, #'queue.declare'{queue = DLXQueue, + auto_delete = false}), + + TestMsgTTL = 5000, + Queue = <<"ha.two.test">>, + %% Sadly we need fairly high numbers for the TTL because starting/stopping + %% nodes takes a fair amount of time. + Args = [{<<"x-message-ttl">>, long, TestMsgTTL}, + {<<"x-dead-letter-exchange">>, longstr, <<>>}, + {<<"x-dead-letter-routing-key">>, longstr, DLXQueue}], + #'queue.declare_ok'{} = + amqp_channel:call(Channel, #'queue.declare'{queue = Queue, + auto_delete = false, + arguments = Args}), + + slave_synced(Master, Queue), + + %% All unknown + rabbit_ct_broker_helpers:stop_broker(Config, Slave), + send_dummy_message(Channel, Queue), + send_dummy_message(Channel, Queue), + rabbit_ct_broker_helpers:start_broker(Config, Slave), + slave_unsynced(Master, Queue), + wait_for_messages(DLXQueue, DLXChannel, 2), + slave_synced(Master, Queue), + + %% 1 unknown, 1 known + rabbit_ct_broker_helpers:stop_broker(Config, Slave), + send_dummy_message(Channel, Queue), + rabbit_ct_broker_helpers:start_broker(Config, Slave), + slave_unsynced(Master, Queue), + send_dummy_message(Channel, Queue), + slave_unsynced(Master, Queue), + wait_for_messages(DLXQueue, DLXChannel, 2), + slave_synced(Master, Queue), + + %% %% both known + send_dummy_message(Channel, Queue), + send_dummy_message(Channel, Queue), + slave_synced(Master, Queue), + wait_for_messages(DLXQueue, DLXChannel, 2), + slave_synced(Master, Queue), + + ok. + +send_dummy_message(Channel, Queue) -> + Payload = <<"foo">>, + Publish = #'basic.publish'{exchange = <<>>, routing_key = Queue}, + amqp_channel:cast(Channel, Publish, #amqp_msg{payload = Payload}). + +slave_pids(Node, Queue) -> + {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup, + [rabbit_misc:r(<<"/">>, queue, Queue)]), + SSP = synchronised_slave_pids, + [{SSP, Pids}] = rpc:call(Node, rabbit_amqqueue, info, [Q, [SSP]]), + case Pids of + '' -> []; + _ -> Pids + end. + +%% The mnesia syncronization takes a while, but we don't want to wait for the +%% test to fail, since the timetrap is quite high. +wait_for_sync_status(Status, Node, Queue) -> + Max = 10000 / ?LOOP_RECURSION_DELAY, + wait_for_sync_status(0, Max, Status, Node, Queue). + +wait_for_sync_status(N, Max, Status, Node, Queue) when N >= Max -> + erlang:error({sync_status_max_tries_failed, + [{queue, Queue}, + {node, Node}, + {expected_status, Status}, + {max_tried, Max}]}); +wait_for_sync_status(N, Max, Status, Node, Queue) -> + Synced = length(slave_pids(Node, Queue)) =:= 1, + case Synced =:= Status of + true -> ok; + false -> timer:sleep(?LOOP_RECURSION_DELAY), + wait_for_sync_status(N + 1, Max, Status, Node, Queue) + end. + +slave_synced(Node, Queue) -> + wait_for_sync_status(true, Node, Queue). + +slave_unsynced(Node, Queue) -> + wait_for_sync_status(false, Node, Queue). + +wait_for_messages(Queue, Channel, N) -> + Sub = #'basic.consume'{queue = Queue}, + #'basic.consume_ok'{consumer_tag = CTag} = amqp_channel:call(Channel, Sub), + receive + #'basic.consume_ok'{} -> ok + end, + lists:foreach( + fun (_) -> receive + {#'basic.deliver'{delivery_tag = Tag}, _Content} -> + amqp_channel:cast(Channel, + #'basic.ack'{delivery_tag = Tag}) + end + end, lists:seq(1, N)), + amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = CTag}). From c8bdf334f626e6b5de7837234be8c541396fe2d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Fri, 27 May 2016 10:41:32 +0200 Subject: [PATCH 117/174] Switch testsuite to common_test, part #4 This is a follow-up to the previous merge of the stable branch. It covers tests which were modified or added only to the master branch. References #725. [#116526487] --- test/channel_interceptor_SUITE.erl | 113 +++ test/channel_operation_timeout_test_queue.erl | 6 +- test/config_schema_SUITE.erl | 153 ++++ .../config_schema_SUITE_data/certs/cacert.pem | 1 + test/config_schema_SUITE_data/certs/cert.pem | 1 + test/config_schema_SUITE_data/certs/key.pem | 1 + .../rabbit-mgmt/access.log | 0 test/config_schema_SUITE_data/snippets.config | 714 ++++++++++++++++++ test/dummy_interceptor.erl | 26 + test/plugin_versioning_SUITE.erl | 177 +++++ test/unit_SUITE.erl | 3 +- test/unit_inbroker_SUITE.erl | 272 +++---- 12 files changed, 1316 insertions(+), 151 deletions(-) create mode 100644 test/channel_interceptor_SUITE.erl create mode 100644 test/config_schema_SUITE.erl create mode 100644 test/config_schema_SUITE_data/certs/cacert.pem create mode 100644 test/config_schema_SUITE_data/certs/cert.pem create mode 100644 test/config_schema_SUITE_data/certs/key.pem create mode 100644 test/config_schema_SUITE_data/rabbit-mgmt/access.log create mode 100644 test/config_schema_SUITE_data/snippets.config create mode 100644 test/dummy_interceptor.erl create mode 100644 test/plugin_versioning_SUITE.erl diff --git a/test/channel_interceptor_SUITE.erl b/test/channel_interceptor_SUITE.erl new file mode 100644 index 000000000000..0e4948ea3c4f --- /dev/null +++ b/test/channel_interceptor_SUITE.erl @@ -0,0 +1,113 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(channel_interceptor_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile(export_all). + +all() -> + [ + {group, non_parallel_tests} + ]. + +groups() -> + [ + {non_parallel_tests, [], [ + register_interceptor + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, Testcase} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +register_interceptor(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, register_interceptor1, [Config]). + +register_interceptor1(Config) -> + PredefinedChannels = rabbit_channel:list(), + + Ch1 = rabbit_ct_client_helpers:open_channel(Config, 0), + + QName = <<"register_interceptor-q">>, + amqp_channel:call(Ch1, #'queue.declare'{queue = QName}), + + [ChannelProc] = rabbit_channel:list() -- PredefinedChannels, + + [{interceptors, []}] = rabbit_channel:info(ChannelProc, [interceptors]), + + check_send_receive(Ch1, QName, <<"bar">>, <<"bar">>), + + ok = rabbit_registry:register(channel_interceptor, + <<"dummy interceptor">>, + dummy_interceptor), + [{interceptors, [{dummy_interceptor, undefined}]}] = + rabbit_channel:info(ChannelProc, [interceptors]), + + check_send_receive(Ch1, QName, <<"bar">>, <<"">>), + + ok = rabbit_registry:unregister(channel_interceptor, + <<"dummy interceptor">>), + [{interceptors, []}] = rabbit_channel:info(ChannelProc, [interceptors]), + + check_send_receive(Ch1, QName, <<"bar">>, <<"bar">>), + passed. + + +check_send_receive(Ch1, QName, Send, Receive) -> + amqp_channel:call(Ch1, + #'basic.publish'{routing_key = QName}, + #amqp_msg{payload = Send}), + + {#'basic.get_ok'{}, #amqp_msg{payload = Receive}} = + amqp_channel:call(Ch1, #'basic.get'{queue = QName, + no_ack = true}). diff --git a/test/channel_operation_timeout_test_queue.erl b/test/channel_operation_timeout_test_queue.erl index 55cd5f42fa55..87c33bea8779 100644 --- a/test/channel_operation_timeout_test_queue.erl +++ b/test/channel_operation_timeout_test_queue.erl @@ -563,7 +563,7 @@ update_rates(State = #vqstate{ in_counter = InCount, ack_in = AckInRate, ack_out = AckOutRate, timestamp = TS }}) -> - Now = time_compat:monotonic_time(), + Now = erlang:monotonic_time(), Rates = #rates { in = update_rate(Now, TS, InCount, InRate), out = update_rate(Now, TS, OutCount, OutRate), @@ -578,7 +578,7 @@ update_rates(State = #vqstate{ in_counter = InCount, rates = Rates }. update_rate(Now, TS, Count, Rate) -> - Time = time_compat:convert_time_unit(Now - TS, native, micro_seconds) / + Time = erlang:convert_time_unit(Now - TS, native, micro_seconds) / ?MICROS_PER_SECOND, if Time == 0 -> Rate; @@ -1076,7 +1076,7 @@ init(IsDurable, IndexState, DeltaCount, DeltaBytes, Terms, count = DeltaCount1, end_seq_id = NextSeqId }) end, - Now = time_compat:monotonic_time(), + Now = erlang:monotonic_time(), IoBatchSize = rabbit_misc:get_env(rabbit, msg_store_io_batch_size, ?IO_BATCH_SIZE), diff --git a/test/config_schema_SUITE.erl b/test/config_schema_SUITE.erl new file mode 100644 index 000000000000..b1362aac287f --- /dev/null +++ b/test/config_schema_SUITE.erl @@ -0,0 +1,153 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(config_schema_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile(export_all). + +all() -> + [ + {group, non_parallel_tests} + ]. + +groups() -> + [ + {non_parallel_tests, [], [ + run_snippets + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, Testcase} + ]), + Config2 = case Testcase of + run_snippets -> + SchemaDir = filename:join(?config(priv_dir, Config1), "schema"), + ResultsDir = filename:join(?config(priv_dir, Config1), "results"), + Snippets = filename:join(?config(data_dir, Config1), + "snippets.config"), + ok = file:make_dir(SchemaDir), + ok = file:make_dir(ResultsDir), + rabbit_ct_helpers:set_config(Config1, [ + {schema_dir, SchemaDir}, + {results_dir, ResultsDir}, + {conf_snippets, Snippets} + ]) + end, + rabbit_ct_helpers:run_steps(Config2, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +run_snippets(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, run_snippets1, [Config]). + +run_snippets1(Config) -> + prepare_plugin_schemas(Config), + {ok, [Snippets]} = file:consult(?config(conf_snippets, Config)), + lists:map( + fun({N, S, C, P}) -> ok = test_snippet(Config, {integer_to_list(N), S, []}, C, P); + ({N, S, A, C, P}) -> ok = test_snippet(Config, {integer_to_list(N), S, A}, C, P) + end, + Snippets), + passed. + +test_snippet(Config, Snippet, Expected, _Plugins) -> + {ConfFile, AdvancedFile} = write_snippet(Config, Snippet), + {ok, GeneratedFile} = generate_config(Config, ConfFile, AdvancedFile), + {ok, [Generated]} = file:consult(GeneratedFile), + Gen = deepsort(Generated), + Exp = deepsort(Expected), + case Exp of + Gen -> ok; + _ -> + error({config_mismatch, Snippet, Exp, Gen}) + end. + +write_snippet(Config, {Name, Conf, Advanced}) -> + ResultsDir = ?config(results_dir, Config), + file:make_dir(filename:join(ResultsDir, Name)), + ConfFile = filename:join([ResultsDir, Name, "config.conf"]), + AdvancedFile = filename:join([ResultsDir, Name, "advanced.config"]), + + file:write_file(ConfFile, Conf), + rabbit_file:write_term_file(AdvancedFile, [Advanced]), + {ConfFile, AdvancedFile}. + +generate_config(Config, ConfFile, AdvancedFile) -> + SchemaDir = ?config(schema_dir, Config), + ResultsDir = ?config(results_dir, Config), + Rabbitmqctl = ?config(rabbitmqctl_cmd, Config), + ScriptDir = filename:dirname(Rabbitmqctl), + ct:pal("ConfFile=~p ScriptDir=~p SchemaDir=~p AdvancedFile=~p", [ConfFile, ScriptDir, SchemaDir, AdvancedFile]), + rabbit_config:generate_config_file([ConfFile], ResultsDir, ScriptDir, + SchemaDir, AdvancedFile). + +prepare_plugin_schemas(Config) -> + SchemaDir = ?config(schema_dir, Config), + DepsDir = ?config(erlang_mk_depsdir, Config), + Files = filelib:wildcard( + filename:join(DepsDir, "*/priv/schema/*.schema")), + [ file:copy(File, filename:join([SchemaDir, filename:basename(File)])) + || File <- Files ]. + +deepsort(List) -> + case is_proplist(List) of + true -> + lists:keysort(1, lists:map(fun({K, V}) -> {K, deepsort(V)}; + (V) -> V end, + List)); + false -> + case is_list(List) of + true -> lists:sort(List); + false -> List + end + end. + +is_proplist([{_Key, _Val}|_] = List) -> lists:all(fun({_K, _V}) -> true; (_) -> false end, List); +is_proplist(_) -> false. diff --git a/test/config_schema_SUITE_data/certs/cacert.pem b/test/config_schema_SUITE_data/certs/cacert.pem new file mode 100644 index 000000000000..eaf6b67806ce --- /dev/null +++ b/test/config_schema_SUITE_data/certs/cacert.pem @@ -0,0 +1 @@ +I'm not a certificate diff --git a/test/config_schema_SUITE_data/certs/cert.pem b/test/config_schema_SUITE_data/certs/cert.pem new file mode 100644 index 000000000000..eaf6b67806ce --- /dev/null +++ b/test/config_schema_SUITE_data/certs/cert.pem @@ -0,0 +1 @@ +I'm not a certificate diff --git a/test/config_schema_SUITE_data/certs/key.pem b/test/config_schema_SUITE_data/certs/key.pem new file mode 100644 index 000000000000..eaf6b67806ce --- /dev/null +++ b/test/config_schema_SUITE_data/certs/key.pem @@ -0,0 +1 @@ +I'm not a certificate diff --git a/test/config_schema_SUITE_data/rabbit-mgmt/access.log b/test/config_schema_SUITE_data/rabbit-mgmt/access.log new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/test/config_schema_SUITE_data/snippets.config b/test/config_schema_SUITE_data/snippets.config new file mode 100644 index 000000000000..22c9f2b7cdc8 --- /dev/null +++ b/test/config_schema_SUITE_data/snippets.config @@ -0,0 +1,714 @@ +[ +{1, +"auth_backends.1 = internal", +[{rabbit, [{auth_backends, [rabbit_auth_backend_internal]}]}],[]} +, +{2, +"auth_backends.1 = ldap", +[{rabbit, [{auth_backends, [rabbit_auth_backend_ldap]}]}],[]} +, + +{3, +"auth_backends.1 = ldap +auth_backends.2 = internal", + +[{rabbit, [ + {auth_backends, [rabbit_auth_backend_ldap, rabbit_auth_backend_internal]} + ] + }],[]} + +, + +{4, +"auth_backends.1 = ldap +# uses module name instead of a short alias, \"http\" +auth_backends.2 = rabbit_auth_backend_http", + +[{rabbit, [{auth_backends, [rabbit_auth_backend_ldap, rabbit_auth_backend_http]}]}],[]} + +, + +{5, +"auth_backends.1.authn = internal +# uses module name because this backend is from a 3rd party +auth_backends.1.authz = rabbit_auth_backend_ip_range", +[{rabbit, [{auth_backends, [{rabbit_auth_backend_internal, rabbit_auth_backend_ip_range}]}]}],[]} +, +{6, +"auth_backends.1.authn = ldap +auth_backends.1.authz = internal", +[{rabbit, [{auth_backends, [{rabbit_auth_backend_ldap, rabbit_auth_backend_internal}]}]}],[]} +, + +{7, +"auth_backends.1.authn = ldap +auth_backends.1.authz = internal +auth_backends.2 = internal", +[{rabbit, [ + {auth_backends, [{rabbit_auth_backend_ldap, rabbit_auth_backend_internal}, + rabbit_auth_backend_internal]} + ] + }],[]} +, + + +{8, +"ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem +ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem +ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem +ssl_options.verify = verify_peer +ssl_options.fail_if_no_peer_cert = true", +[ + {rabbit, [{ssl_options, [{cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"}, + {certfile, "test/config_schema_SUITE_data/certs/cert.pem"}, + {keyfile, "test/config_schema_SUITE_data/certs/key.pem"}, + {verify, verify_peer}, + {fail_if_no_peer_cert, true}]} + ]}],[]} +, + +{9, +"listeners.tcp.default = 5673", +[{rabbit, [{tcp_listeners, [5673]}]}],[]} +, + +{10, +"listeners.ssl = none", +[{rabbit, [{ssl_listeners, []}]}],[]} +, +{11, +"num_acceptors.ssl = 1", +[{rabbit, [{num_ssl_acceptors, 1}]}],[]} +, +{12, +"default_user = guest +default_pass = guest +default_user_tags.administrator = true +default_permissions.configure = .* +default_permissions.read = .* +default_permissions.write = .*", +[{rabbit, [ +{default_user, <<"guest">>}, +{default_pass, <<"guest">>}, +{default_user_tags, [administrator]}, +{default_permissions, [<<".*">>, <<".*">>, <<".*">>]}]}],[]} +, +{13, +"cluster_nodes.disc.1 = rabbit@hostname1 +cluster_nodes.disc.2 = rabbit@hostname2", +[{rabbit, [ + {cluster_nodes, {[rabbit@hostname2,rabbit@hostname1], disc}} +]}],[]} +, +{14, +"tcp_listen_options.backlog = 128 +tcp_listen_options.nodelay = true +tcp_listen_options.exit_on_close = false", +[{rabbit, [{tcp_listen_options, [{backlog, 128}, +{nodelay, true}, +{exit_on_close, false}]}]}],[]} +, +{15, +"auth_backends.1.authn = ldap +auth_backends.1.authz = internal +auth_backends.2 = internal", +[{rabbit,[{auth_backends, [{rabbit_auth_backend_ldap, rabbit_auth_backend_internal}, + rabbit_auth_backend_internal]}]}],[]} +, +{16, +"rabbitmq_auth_backend_ldap.servers.1 = some_server + rabbitmq_auth_backend_ldap.servers.2 = some_other_server", +[{rabbitmq_auth_backend_ldap, [{servers, ["some_server", "some_other_server"]}]}], +[rabbitmq_auth_backend_ldap]} +, +{17, +"rabbitmq_auth_backend_ldap.dn_lookup_attribute = userPrincipalName +rabbitmq_auth_backend_ldap.dn_lookup_base = DC=gopivotal,DC=com +rabbitmq_auth_backend_ldap.dn_lookup_bind = as_user", +[{rabbitmq_auth_backend_ldap, [{dn_lookup_attribute, "userPrincipalName"}, +{dn_lookup_base, "DC=gopivotal,DC=com"}, +{dn_lookup_bind, as_user}]}], +[rabbitmq_auth_backend_ldap]} +, +{18, +"rabbitmq_auth_backend_ldap.dn_lookup_bind.user_dn = username +rabbitmq_auth_backend_ldap.dn_lookup_bind.password = password", +[{rabbitmq_auth_backend_ldap, [ +{dn_lookup_bind, {"username", "password"}}]}], +[rabbitmq_auth_backend_ldap]} +, +{19, +"rabbitmq_auth_backend_ldap.other_bind = anon", +[{rabbitmq_auth_backend_ldap, [{other_bind, anon}]}], +[rabbitmq_auth_backend_ldap]} +, +{20, +"rabbitmq_auth_backend_ldap.other_bind = as_user", +[{rabbitmq_auth_backend_ldap, [{other_bind, as_user}]}], +[rabbitmq_auth_backend_ldap]} +, +{21, +"rabbitmq_auth_backend_ldap.other_bind.user_dn = username +rabbitmq_auth_backend_ldap.other_bind.password = password", +[{rabbitmq_auth_backend_ldap, [{other_bind, {"username", "password"}}]}], +[rabbitmq_auth_backend_ldap]} +, +{22, +"listeners.tcp.default = 5672 +collect_statistics_interval = 10000 +management.http_log_dir = test/config_schema_SUITE_data/rabbit-mgmt +management.rates_mode = basic", +[{rabbit, [ {tcp_listeners, [5672]}, + {collect_statistics_interval, 10000}]}, + {rabbitmq_management, [ {http_log_dir, "test/config_schema_SUITE_data/rabbit-mgmt"}, + {rates_mode, basic}]} +], +[rabbitmq_management]} +, +{23, +"management.listener.port = 12345", +[{rabbitmq_management, [{listener, [{port, 12345}]}]}], +[rabbitmq_management]} +, +{24, +"management.listener.port = 15671 +management.listener.ssl = true +management.listener.ssl_opts.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem +management.listener.ssl_opts.certfile = test/config_schema_SUITE_data/certs/cert.pem +management.listener.ssl_opts.keyfile = test/config_schema_SUITE_data/certs/key.pem", +[{rabbitmq_management, + [{listener, [{port, 15671}, + {ssl, true}, + {ssl_opts, [{cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"}, + {certfile, "test/config_schema_SUITE_data/certs/cert.pem"}, + {keyfile, "test/config_schema_SUITE_data/certs/key.pem"}]} + ]} + ]} +], +[rabbitmq_management]} +, +{25, +"management.sample_retention_policies.global.minute = 5 +management.sample_retention_policies.global.hour = 60 +management.sample_retention_policies.global.day = 1200 + +management.sample_retention_policies.basic.minute = 5 +management.sample_retention_policies.basic.hour = 60 + +management.sample_retention_policies.detailed.10 = 5", +[{rabbitmq_management,[ + {sample_retention_policies, + %% List of {MaxAgeInSeconds, SampleEveryNSeconds} + [{global, [{60, 5}, {3600, 60}, {86400, 1200}]}, + {basic, [{60, 5}, {3600, 60}]}, + {detailed, [{10, 5}]}]} +]}], +[rabbitmq_management]} +, +{26, +"vm_memory_high_watermark.absolute = 1073741824", +[{rabbit, [{vm_memory_high_watermark, {absolute, 1073741824}}]}],[]} +, +{27, +"vm_memory_high_watermark.absolute = 1024MB", +[{rabbit, [{vm_memory_high_watermark, {absolute, "1024MB"}}]}],[]} +, +{28, +"vm_memory_high_watermark_paging_ratio = 0.75 +vm_memory_high_watermark.relative = 0.4", +[{rabbit, [{vm_memory_high_watermark_paging_ratio, 0.75}, + {vm_memory_high_watermark, 0.4}]}],[]} +, +{29, +"listeners.tcp.default = 5672 +mqtt.default_user = guest +mqtt.default_pass = guest +mqtt.allow_anonymous = true +mqtt.vhost = / +mqtt.exchange = amq.topic +mqtt.subscription_ttl = 1800000 +mqtt.prefetch = 10 +mqtt.listeners.ssl = none +## Default MQTT with TLS port is 8883 +# mqtt.listeners.ssl.default = 8883 +mqtt.listeners.tcp.default = 1883 +mqtt.tcp_listen_options.backlog = 128 +mqtt.tcp_listen_options.nodelay = true", +[{rabbit, [{tcp_listeners, [5672]}]}, + {rabbitmq_mqtt, [{default_user, <<"guest">>}, + {default_pass, <<"guest">>}, + {allow_anonymous, true}, + {vhost, <<"/">>}, + {exchange, <<"amq.topic">>}, + {subscription_ttl, 1800000}, + {prefetch, 10}, + {ssl_listeners, []}, + %% Default MQTT with TLS port is 8883 + %% {ssl_listeners, [8883]} + {tcp_listeners, [1883]}, + {tcp_listen_options, [{backlog, 128}, + {nodelay, true}]}]} + ], +[rabbitmq_mqtt]} +, +{30, +"ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem +ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem +ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem +ssl_options.verify = verify_peer +ssl_options.fail_if_no_peer_cert = true + +mqtt.listeners.ssl.default = 8883 +mqtt.listeners.tcp.default = 1883", +[{rabbit, [ + {ssl_options, [{cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"}, + {certfile, "test/config_schema_SUITE_data/certs/cert.pem"}, + {keyfile, "test/config_schema_SUITE_data/certs/key.pem"}, + {verify, verify_peer}, + {fail_if_no_peer_cert, true}]} + ]}, + {rabbitmq_mqtt, [ + {ssl_listeners, [8883]}, + {tcp_listeners, [1883]} + ]} + ], +[rabbitmq_mqtt]} +, +{31, +"mqtt.ssl_cert_login = true", +[{rabbitmq_mqtt, [{ssl_cert_login, true}]}], [rabbitmq_mqtt]} +, + +{32, +"ssl_cert_login_from = common_name", +[{rabbit, [{ssl_cert_login_from, common_name}]}], [rabbitmq_mqtt]} +, + + +{33, +"listeners.tcp.default = 5672 +mqtt.default_user = guest +mqtt.default_pass = guest +mqtt.allow_anonymous = true +mqtt.vhost = / +mqtt.exchange = amq.topic +mqtt.subscription_ttl = undefined +mqtt.prefetch = 10", +[{rabbit, [{tcp_listeners, [5672]}]}, + {rabbitmq_mqtt, [{default_user, <<"guest">>}, + {default_pass, <<"guest">>}, + {allow_anonymous, true}, + {vhost, <<"/">>}, + {exchange, <<"amq.topic">>}, + {subscription_ttl, undefined}, + {prefetch, 10}]} + ], +[rabbitmq_mqtt]} +, +{34, +"mqtt.default_user = guest +mqtt.default_pass = guest +mqtt.allow_anonymous = true +mqtt.vhost = / +mqtt.exchange = amq.topic +mqtt.subscription_ttl = 1800000 +mqtt.prefetch = 10 +## use DETS (disk-based) store for retained messages +mqtt.retained_message_store = rabbit_mqtt_retained_msg_store_dets +## only used by DETS store +mqtt.retained_message_store_dets_sync_interval = 2000 + +mqtt.listeners.ssl = none +mqtt.listeners.tcp.default = 1883", +[{rabbitmq_mqtt, [{default_user, <<"guest">>}, + {default_pass, <<"guest">>}, + {allow_anonymous, true}, + {vhost, <<"/">>}, + {exchange, <<"amq.topic">>}, + {subscription_ttl, 1800000}, + {prefetch, 10}, + %% use DETS (disk-based) store for retained messages + {retained_message_store, rabbit_mqtt_retained_msg_store_dets}, + %% only used by DETS store + {retained_message_store_dets_sync_interval, 2000}, + {ssl_listeners, []}, + {tcp_listeners, [1883]}]} + ], +[rabbitmq_mqtt]} +, + +{35, +"listeners.tcp.1 = 192.168.1.99:5672", +[ + {rabbit, [ + {tcp_listeners, [{"192.168.1.99", 5672}]} + ]} +], []} +, +{36, +"listeners.tcp.1 = 127.0.0.1:5672 +listeners.tcp.2 = ::1:5672", +[ + {rabbit, [ + {tcp_listeners, [{"127.0.0.1", 5672}, + {"::1", 5672}]} + ]} +], []} +, +{37, +"listeners.tcp.1 = :::5672", +[ + {rabbit, [ + {tcp_listeners, [{"::", 5672}]} + ]} +], []} +, +{38, +"listeners.tcp.1 = 192.168.1.99:5672", +[ + {rabbit, [ + {tcp_listeners, [{"192.168.1.99", 5672}]} + ]} +], []} +, +{39, +"listeners.tcp.1 = fe80::2acf:e9ff:fe17:f97b:5672", +[ + {rabbit, [ + {tcp_listeners, [{"fe80::2acf:e9ff:fe17:f97b", 5672}]} + ]} +], []} +, +{40, +"tcp_listen_options.backlog = 128 +tcp_listen_options.nodelay = true +tcp_listen_options.sndbuf = 196608 +tcp_listen_options.recbuf = 196608", +[ + {rabbit, [ + {tcp_listen_options, [ + {backlog, 128}, + {nodelay, true}, + {sndbuf, 196608}, + {recbuf, 196608} + ]} + ]} +], []} +, + +{42, +"tcp_listen_options.backlog = 4096 +tcp_listen_options.nodelay = true", +[ + {kernel, [ + {inet_default_connect_options, [{nodelay, true}]}, + {inet_default_listen_options, [{nodelay, true}]} + ]}] +, +[ + {kernel, [ + {inet_default_connect_options, [{nodelay, true}]}, + {inet_default_listen_options, [{nodelay, true}]} + ]}, + {rabbit, [ + {tcp_listen_options, [ + {backlog, 4096}, + {nodelay, true} + ]} + ]} +], []} +, + +{43, +"tcp_listen_options.backlog = 4096 +tcp_listen_options.nodelay = true", +[ + {rabbit, [ + {tcp_listen_options, [ + {backlog, 4096}, + {nodelay, true} + ]} + ]} +], []} +, + +{44, +"ssl_handshake_timeout = 10000", +[ + {rabbit, [ + %% 10 seconds + {ssl_handshake_timeout, 10000} + ]} +], []} +, + +{45, +"cluster_partition_handling = pause_if_all_down + +## Recover strategy. Can be either 'autoheal' or 'ignore' +cluster_partition_handling.pause_if_all_down.recover = ignore + +## Node names to check +cluster_partition_handling.pause_if_all_down.nodes.1 = rabbit@myhost1 +cluster_partition_handling.pause_if_all_down.nodes.2 = rabbit@myhost2", +[{rabbit, [{cluster_partition_handling, {pause_if_all_down, [rabbit@myhost2, rabbit@myhost1], ignore}}]}], []} +, +{46, +"cluster_partition_handling = autoheal", +[{rabbit, [{cluster_partition_handling, autoheal}]}], []} +, +{47, +"password_hashing_module = rabbit_password_hashing_sha512", +[ + {rabbit, [{password_hashing_module, rabbit_password_hashing_sha512}]} +],[]} +, + +{48, +"listeners.ssl.1 = 5671 +ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem +ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem +ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem +ssl_options.verify = verify_peer +ssl_options.fail_if_no_peer_cert = false" +, +[ + {rabbit, [ + {ssl_listeners, [5671]}, + {ssl_options, [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, + {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, + {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, + {verify,verify_peer}, + {fail_if_no_peer_cert,false}]} + ]} +],[]} +, + + +{49, +"listeners.ssl.1 = 5671 +ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem +ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem +ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem +ssl_options.password = t0p$3kRe7", +[ + {rabbit, [ + {ssl_listeners, [5671]}, + {ssl_options, [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, + {certfile, "test/config_schema_SUITE_data/certs/cert.pem"}, + {keyfile, "test/config_schema_SUITE_data/certs/key.pem"}, + {password, "t0p$3kRe7"} + ]} + ]} +],[]} +, + +{50, +"listeners.ssl.1 = 5671 +ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem +ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem +ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem +ssl_options.versions.tls1_2 = tlsv1.2 +ssl_options.versions.tls1_1 = tlsv1.1 +ssl_options.versions.tls1 = tlsv1", +[{ssl, [{versions, ['tlsv1.2', 'tlsv1.1', tlsv1]}]}], +[{ssl, [{versions, ['tlsv1.2', 'tlsv1.1', tlsv1]}]}, + {rabbit, [ + {ssl_listeners, [5671]}, + {ssl_options, [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, + {certfile, "test/config_schema_SUITE_data/certs/cert.pem"}, + {keyfile, "test/config_schema_SUITE_data/certs/key.pem"}, + {versions, ['tlsv1.2', 'tlsv1.1', tlsv1]} + ]} + ]} +],[]} +, +{51, +"listeners.ssl.1 = 5671 +ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem +ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem +ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem +ssl_options.versions.tls1_2 = tlsv1.2 +ssl_options.versions.tls1_1 = tlsv1.1", +[{ssl, [{versions, ['tlsv1.2', 'tlsv1.1']}]}], +[ + {ssl, [{versions, ['tlsv1.2', 'tlsv1.1']}]}, + {rabbit, [ + {ssl_listeners, [5671]}, + {ssl_options, [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, + {certfile, "test/config_schema_SUITE_data/certs/cert.pem"}, + {keyfile, "test/config_schema_SUITE_data/certs/key.pem"}, + {versions, ['tlsv1.2', 'tlsv1.1']} + ]} + ]} +],[]} +, +{52, +"listeners.ssl.1 = 5671 +ssl_allow_poodle_attack = true +ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem +ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem +ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem +ssl_options.verify = verify_peer +ssl_options.fail_if_no_peer_cert = false", +[ + {rabbit, [ + {ssl_listeners, [5671]}, + {ssl_allow_poodle_attack, true}, + {ssl_options, [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, + {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, + {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, + {verify,verify_peer}, + {fail_if_no_peer_cert,false}]} + ]} +],[]} +, +{53, +"listeners.ssl.1 = 5671 +ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem +ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem +ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem +ssl_options.depth = 2 +ssl_options.verify = verify_peer +ssl_options.fail_if_no_peer_cert = false", +[ + {rabbit, [ + {ssl_listeners, [5671]}, + {ssl_options, [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, + {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, + {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, + {depth, 2}, + {verify,verify_peer}, + {fail_if_no_peer_cert,false}]} + ]} +],[]} +, +{54, +"stomp.listeners.tcp.1 = 12345", +[{rabbitmq_stomp, [{tcp_listeners, [12345]}]}],[rabbitmq_stomp]} +, +{55, +"stomp.listeners.tcp.1 = 127.0.0.1:61613 +stomp.listeners.tcp.2 = ::1:61613", +[{rabbitmq_stomp, [{tcp_listeners, [{"127.0.0.1", 61613}, + {"::1", 61613}]}]}],[rabbitmq_stomp]} +, +{56, +"ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem +ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem +ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem +ssl_options.verify = verify_peer +ssl_options.fail_if_no_peer_cert = true + +stomp.listeners.tcp.1 = 61613 +stomp.listeners.ssl.1 = 61614", +[{rabbit,[ +{ssl_options, [{cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"}, + {certfile, "test/config_schema_SUITE_data/certs/cert.pem"}, + {keyfile, "test/config_schema_SUITE_data/certs/key.pem"}, + {verify, verify_peer}, + {fail_if_no_peer_cert, true}]}]}, +{rabbitmq_stomp, [{tcp_listeners, [61613]}, +{ssl_listeners, [61614]}]} + ],[]} +, + +{57, +"stomp.default_user = guest +stomp.default_pass = guest", +[{rabbitmq_stomp, [{default_user, [{login, "guest"},{passcode, "guest"}]}]}], +[rabbitmq_stomp]} +, +{58, +"stomp.ssl_cert_login = true", +[{rabbitmq_stomp, [{ssl_cert_login, true}]}], +[rabbitmq_stomp]} +, +{59, +"ssl_cert_login_from = common_name", +[{rabbit, [{ssl_cert_login_from, common_name}]}], []} +, +{60, +"stomp.default_user = guest +stomp.default_pass = guest +stomp.implicit_connect = true", +[{rabbitmq_stomp, [{default_user,[{login, "guest"}, {passcode, "guest"}]},{implicit_connect, true}]}], +[rabbitmq_stomp]} +, +{61, +"stomp.default_vhost = /", +[{rabbitmq_stomp, [{default_vhost, <<"/">>}]}], +[rabbitmq_stomp]} +, +{62, +"management.listener.port = 15672 +management.listener.ip = 127.0.0.1", +[{rabbitmq_management, + [{listener, [{port, 15672}, + {ip, "127.0.0.1"} + ]} + ]} +], +[rabbitmq_management]} +, +{63, +"management.listener.port = 15672 +management.listener.ssl = true + +management.listener.ssl_opts.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem +management.listener.ssl_opts.certfile = test/config_schema_SUITE_data/certs/cert.pem +management.listener.ssl_opts.keyfile = test/config_schema_SUITE_data/certs/key.pem", +[{rabbitmq_management, + [{listener, [{port, 15672}, + {ssl, true}, + {ssl_opts, [{cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"}, + {certfile, "test/config_schema_SUITE_data/certs/cert.pem"}, + {keyfile, "test/config_schema_SUITE_data/certs/key.pem"}]} + ]} + ]} +], +[rabbitmq_management]}, +{64, +"web_stomp.port = 12345", +[{rabbitmq_web_stomp, [{port, 12345}]}], +[rabbitmq_web_stomp]}, +{65, +"web_stomp.ssl.port = 15671 +web_stomp.ssl.backlog = 1024 +web_stomp.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem +web_stomp.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem +web_stomp.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem +web_stomp.ssl.password = changeme", +[{rabbitmq_web_stomp, + [{ssl_config, [{port, 15671}, + {backlog, 1024}, + {certfile, "test/config_schema_SUITE_data/certs/cert.pem"}, + {keyfile, "test/config_schema_SUITE_data/certs/key.pem"}, + {cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"}, + {password, "changeme"}]}]}], +[rabbitmq_web_stomp]}, +{66, +"web_stomp.ws_frame = binary", +[{rabbitmq_web_stomp, [{ws_frame, binary}]}], +[rabbitmq_web_stomp]}, +{67, +"web_stomp.cowboy_opts.max_keepalive = 10", +[{rabbitmq_web_stomp,[{cowboy_opts, [{max_keepalive, 10}]}]}], +[rabbitmq_web_stomp]}, +{68, +"web_stomp.sockjs_opts.url = https://cdn.jsdelivr.net/sockjs/0.3.4/sockjs.min.js", +[{rabbitmq_web_stomp, + [{sockjs_opts, [{sockjs_url, "https://cdn.jsdelivr.net/sockjs/0.3.4/sockjs.min.js"}]}]}], +[rabbitmq_web_stomp]}, +{69, +"auth_backends.1 = http +rabbitmq_auth_backend_http.user_path = http://some-server/auth/user +rabbitmq_auth_backend_http.vhost_path = http://some-server/auth/vhost +rabbitmq_auth_backend_http.resource_path = http://some-server/auth/resource", +[{rabbit, [{auth_backends, [rabbit_auth_backend_http]}]}, + {rabbitmq_auth_backend_http, + [{user_path, "http://some-server/auth/user"}, + {vhost_path, "http://some-server/auth/vhost"}, + {resource_path, "http://some-server/auth/resource"}]}], +[rabbitmq_auth_backend_http]} +]. diff --git a/test/dummy_interceptor.erl b/test/dummy_interceptor.erl new file mode 100644 index 000000000000..6d510a307306 --- /dev/null +++ b/test/dummy_interceptor.erl @@ -0,0 +1,26 @@ +-module(dummy_interceptor). + +-behaviour(rabbit_channel_interceptor). + +-include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("rabbit_common/include/rabbit_framing.hrl"). + + +-compile(export_all). + +init(_Ch) -> + undefined. + +description() -> + [{description, + <<"Empties payload on publish">>}]. + +intercept(#'basic.publish'{} = Method, Content, _IState) -> + Content2 = Content#content{payload_fragments_rev = []}, + {Method, Content2}; + +intercept(Method, Content, _VHost) -> + {Method, Content}. + +applies_to() -> + ['basic.publish']. diff --git a/test/plugin_versioning_SUITE.erl b/test/plugin_versioning_SUITE.erl new file mode 100644 index 000000000000..7fcfe433e007 --- /dev/null +++ b/test/plugin_versioning_SUITE.erl @@ -0,0 +1,177 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(plugin_versioning_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile(export_all). + +all() -> + [ + {group, parallel_tests} + ]. + +groups() -> + [ + {parallel_tests, [parallel], [ + version_support, + plugin_validation + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +version_support(_Config) -> + Examples = [ + {[], "any version", true} %% anything goes + ,{[], "0.0.0", true} %% ditto + ,{[], "3.5.6", true} %% ditto + ,{["something"], "something", true} %% equal values match + ,{["3.5.4"], "something", false} + ,{["3.4.5", "3.6.0"], "0.0.0", true} %% zero version always match + ,{["3.4.5", "3.6.0"], "", true} %% empty version always match + ,{["something", "3.5.6"], "3.5.7", true} %% 3.5.7 matches ~> 3.5.6 + ,{["3.4.0", "3.5.6"], "3.6.1", false} %% 3.6.x isn't supported + ,{["3.5.2", "3.6.1", "3.7.1"], "3.5.2", true} %% 3.5.2 matches ~> 3.5.2 + ,{["3.5.2", "3.6.1", "3.7.1"], "3.5.1", false} %% lesser than the lower boundary + ,{["3.5.2", "3.6.1", "3.7.1"], "3.6.2", true} %% 3.6.2 matches ~> 3.6.1 + ,{["3.5.2", "3.6.1", "3.6.8"], "3.6.2", true} %% 3.6.2 still matches ~> 3.6.1 + ,{["3.5", "3.6", "3.7"], "3.5.1", false} %% x.y values are not supported + ,{["3"], "3.5.1", false} %% x values are not supported + ,{["3.5.2", "3.6.1"], "3.6.2.999", true} %% x.y.z.p values are supported + ,{["3.5.2", "3.6.2.333"], "3.6.2.999", true} %% x.y.z.p values are supported + ,{["3.5.2", "3.6.2.333"], "3.6.2.222", false} %% x.y.z.p values are supported + ], + + lists:foreach( + fun({Versions, RabbitVersion, Expected}) -> + {Expected, RabbitVersion, Versions} = + {rabbit_plugins:is_version_supported(RabbitVersion, Versions), + RabbitVersion, Versions} + end, + Examples), + ok. + +-record(validation_example, {rabbit_version, plugins, errors, valid}). + +plugin_validation(_Config) -> + Examples = [ + #validation_example{ + rabbit_version = "3.7.1", + plugins = + [{plugin_a, "3.7.2", ["3.5.6", "3.7.1"], []}, + {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.6.3", "3.7.1"]}]}], + errors = [], + valid = [plugin_a, plugin_b]}, + + #validation_example{ + rabbit_version = "3.7.1", + plugins = + [{plugin_a, "3.7.1", ["3.7.6"], []}, + {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.6.3", "3.7.0"]}]}], + errors = + [{plugin_a, [{broker_version_mismatch, "3.7.1", ["3.7.6"]}]}, + {plugin_b, [{missing_dependency, plugin_a}]}], + valid = [] + }, + + #validation_example{ + rabbit_version = "3.7.1", + plugins = + [{plugin_a, "3.7.1", ["3.7.6"], []}, + {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.7.0"]}]}, + {plugin_c, "3.7.2", ["3.7.0"], [{plugin_b, ["3.7.3"]}]}], + errors = + [{plugin_a, [{broker_version_mismatch, "3.7.1", ["3.7.6"]}]}, + {plugin_b, [{missing_dependency, plugin_a}]}, + {plugin_c, [{missing_dependency, plugin_b}]}], + valid = [] + }, + + #validation_example{ + rabbit_version = "3.7.1", + plugins = + [{plugin_a, "3.7.1", ["3.7.1"], []}, + {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.7.3"]}]}, + {plugin_d, "3.7.2", ["3.7.0"], [{plugin_c, ["3.7.3"]}]}], + errors = + [{plugin_b, [{{dependency_version_mismatch, "3.7.1", ["3.7.3"]}, plugin_a}]}, + {plugin_d, [{missing_dependency, plugin_c}]}], + valid = [plugin_a] + }, + #validation_example{ + rabbit_version = "0.0.0", + plugins = + [{plugin_a, "", ["3.7.1"], []}, + {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.7.3"]}]}], + errors = [], + valid = [plugin_a, plugin_b] + }], + lists:foreach( + fun(#validation_example{rabbit_version = RabbitVersion, + plugins = PluginsExamples, + errors = Errors, + valid = ExpectedValid}) -> + Plugins = make_plugins(PluginsExamples), + {Valid, Invalid} = rabbit_plugins:validate_plugins(Plugins, + RabbitVersion), + Errors = lists:reverse(Invalid), + ExpectedValid = lists:reverse(lists:map(fun(#plugin{name = Name}) -> + Name + end, + Valid)) + end, + Examples), + ok. + +make_plugins(Plugins) -> + lists:map( + fun({Name, Version, RabbitVersions, PluginsVersions}) -> + Deps = [K || {K,_V} <- PluginsVersions], + #plugin{name = Name, + version = Version, + dependencies = Deps, + broker_version_requirements = RabbitVersions, + dependency_version_requirements = PluginsVersions} + end, + Plugins). diff --git a/test/unit_SUITE.erl b/test/unit_SUITE.erl index ba0f43f11e34..30bf4d937cf9 100644 --- a/test/unit_SUITE.erl +++ b/test/unit_SUITE.erl @@ -728,8 +728,9 @@ version_equivalance(_Config) -> true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0"), true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.1"), true = rabbit_misc:version_minor_equivalent("%%VSN%%", "%%VSN%%"), + % Support for 4-number versions + true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0.1"), false = rabbit_misc:version_minor_equivalent("3.0.0", "3.1.0"), false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0"), - false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0.1"), false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.foo"), passed. diff --git a/test/unit_inbroker_SUITE.erl b/test/unit_inbroker_SUITE.erl index dfde1fba220f..5e521df105c5 100644 --- a/test/unit_inbroker_SUITE.erl +++ b/test/unit_inbroker_SUITE.erl @@ -116,7 +116,7 @@ groups() -> log_management, %% Check log files. log_management_during_startup, %% Check log files. memory_high_watermark, %% Trigger alarm. - rotate_logs_without_suffix, %% Check log files. + externally_rotated_logs_are_automatically_reopened, %% Check log files. server_status %% Trigger alarm. ]}, {backing_queue_tests, [], [ @@ -1834,69 +1834,74 @@ log_management(Config) -> ?MODULE, log_management1, [Config]). log_management1(_Config) -> - override_group_leader(), + [LogFile] = rabbit:log_locations(), + Suffix = ".0", - MainLog = rabbit:log_location(kernel), - SaslLog = rabbit:log_location(sasl), - Suffix = ".1", - - ok = test_logs_working(MainLog, SaslLog), + ok = test_logs_working([LogFile]), %% prepare basic logs - file:delete([MainLog, Suffix]), - file:delete([SaslLog, Suffix]), - - %% simple logs reopening - ok = control_action(rotate_logs, []), - ok = test_logs_working(MainLog, SaslLog), + file:delete(LogFile ++ Suffix), + ok = test_logs_working([LogFile]), %% simple log rotation - ok = control_action(rotate_logs, [Suffix]), - [true, true] = non_empty_files([[MainLog, Suffix], [SaslLog, Suffix]]), - [true, true] = empty_files([MainLog, SaslLog]), - ok = test_logs_working(MainLog, SaslLog), - - %% reopening logs with log rotation performed first - ok = clean_logs([MainLog, SaslLog], Suffix), ok = control_action(rotate_logs, []), - ok = file:rename(MainLog, [MainLog, Suffix]), - ok = file:rename(SaslLog, [SaslLog, Suffix]), - ok = test_logs_working([MainLog, Suffix], [SaslLog, Suffix]), + %% FIXME: rabbit:rotate_logs/0 is asynchronous due to a limitation + %% in Lager. Therefore, we have no choice but to wait an arbitrary + %% amount of time. + timer:sleep(2000), + [true, true] = non_empty_files([LogFile ++ Suffix, LogFile]), + ok = test_logs_working([LogFile]), + + %% log rotation on empty files + ok = clean_logs([LogFile], Suffix), ok = control_action(rotate_logs, []), - ok = test_logs_working(MainLog, SaslLog), + timer:sleep(2000), + [{error, enoent}, true] = non_empty_files([LogFile ++ Suffix, LogFile]), - %% log rotation on empty files (the main log will have a ctl action logged) - ok = clean_logs([MainLog, SaslLog], Suffix), + %% logs with suffix are not writable + ok = control_action(rotate_logs, []), + timer:sleep(2000), + ok = make_files_non_writable([LogFile ++ Suffix]), ok = control_action(rotate_logs, []), - ok = control_action(rotate_logs, [Suffix]), - [false, true] = empty_files([[MainLog, Suffix], [SaslLog, Suffix]]), + timer:sleep(2000), + ok = test_logs_working([LogFile]), - %% logs with suffix are not writable - ok = control_action(rotate_logs, [Suffix]), - ok = make_files_non_writable([[MainLog, Suffix], [SaslLog, Suffix]]), - ok = control_action(rotate_logs, [Suffix]), - ok = test_logs_working(MainLog, SaslLog), + %% rotate when original log files are not writable + ok = make_files_non_writable([LogFile]), + ok = control_action(rotate_logs, []), + timer:sleep(2000), %% logging directed to tty (first, remove handlers) - ok = delete_log_handlers([rabbit_sasl_report_file_h, - rabbit_error_logger_file_h]), - ok = clean_logs([MainLog, SaslLog], Suffix), - ok = application:set_env(rabbit, sasl_error_logger, tty), - ok = application:set_env(rabbit, error_logger, tty), - ok = control_action(rotate_logs, []), - [{error, enoent}, {error, enoent}] = empty_files([MainLog, SaslLog]), + ok = control_action(stop_app, []), + ok = clean_logs([LogFile], Suffix), + ok = application:set_env(rabbit, lager_handler, tty), + application:unset_env(lager, handlers), + application:unset_env(lager, extra_sinks), + ok = control_action(start_app, []), + timer:sleep(200), + rabbit_log:info("test info"), + [{error, enoent}] = empty_files([LogFile]), %% rotate logs when logging is turned off - ok = application:set_env(rabbit, sasl_error_logger, false), - ok = application:set_env(rabbit, error_logger, silent), - ok = control_action(rotate_logs, []), - [{error, enoent}, {error, enoent}] = empty_files([MainLog, SaslLog]), + ok = control_action(stop_app, []), + ok = clean_logs([LogFile], Suffix), + ok = application:set_env(rabbit, lager_handler, false), + application:unset_env(lager, handlers), + application:unset_env(lager, extra_sinks), + ok = control_action(start_app, []), + timer:sleep(200), + rabbit_log:error("test error"), + timer:sleep(200), + [{error, enoent}] = empty_files([LogFile]), %% cleanup - ok = application:set_env(rabbit, sasl_error_logger, {file, SaslLog}), - ok = application:set_env(rabbit, error_logger, {file, MainLog}), - ok = add_log_handlers([{rabbit_error_logger_file_h, MainLog}, - {rabbit_sasl_report_file_h, SaslLog}]), + ok = control_action(stop_app, []), + ok = clean_logs([LogFile], Suffix), + ok = application:set_env(rabbit, lager_handler, LogFile), + application:unset_env(lager, handlers), + application:unset_env(lager, extra_sinks), + ok = control_action(start_app, []), + ok = test_logs_working([LogFile]), passed. log_management_during_startup(Config) -> @@ -1904,137 +1909,111 @@ log_management_during_startup(Config) -> ?MODULE, log_management_during_startup1, [Config]). log_management_during_startup1(_Config) -> - MainLog = rabbit:log_location(kernel), - SaslLog = rabbit:log_location(sasl), + [LogFile] = rabbit:log_locations(), + Suffix = ".0", %% start application with simple tty logging ok = control_action(stop_app, []), - ok = application:set_env(rabbit, error_logger, tty), - ok = application:set_env(rabbit, sasl_error_logger, tty), - ok = add_log_handlers([{error_logger_tty_h, []}, - {sasl_report_tty_h, []}]), + ok = clean_logs([LogFile], Suffix), + ok = application:set_env(rabbit, lager_handler, tty), + application:unset_env(lager, handlers), + application:unset_env(lager, extra_sinks), ok = control_action(start_app, []), - %% start application with tty logging and - %% proper handlers not installed - ok = control_action(stop_app, []), - ok = error_logger:tty(false), - ok = delete_log_handlers([sasl_report_tty_h]), - ok = case catch control_action(start_app, []) of - ok -> exit({got_success_but_expected_failure, - log_rotation_tty_no_handlers_test}); - {badrpc, {'EXIT', {error, - {cannot_log_to_tty, _, not_installed}}}} -> ok - end, - - %% fix sasl logging - ok = application:set_env(rabbit, sasl_error_logger, {file, SaslLog}), - %% start application with logging to non-existing directory - TmpLog = "/tmp/rabbit-tests/test.log", - delete_file(TmpLog), + NonExistent = "/tmp/non-existent/test.log", + delete_file(NonExistent), + delete_file(filename:dirname(NonExistent)), ok = control_action(stop_app, []), - ok = application:set_env(rabbit, error_logger, {file, TmpLog}), - - ok = delete_log_handlers([rabbit_error_logger_file_h]), - ok = add_log_handlers([{error_logger_file_h, MainLog}]), + ok = application:set_env(rabbit, lager_handler, NonExistent), + application:unset_env(lager, handlers), + application:unset_env(lager, extra_sinks), ok = control_action(start_app, []), %% start application with logging to directory with no %% write permissions ok = control_action(stop_app, []), - TmpDir = "/tmp/rabbit-tests", - ok = set_permissions(TmpDir, 8#00400), - ok = delete_log_handlers([rabbit_error_logger_file_h]), - ok = add_log_handlers([{error_logger_file_h, MainLog}]), + NoPermission1 = "/var/empty/test.log", + delete_file(NoPermission1), + delete_file(filename:dirname(NoPermission1)), + ok = control_action(stop_app, []), + ok = application:set_env(rabbit, lager_handler, NoPermission1), + application:unset_env(lager, handlers), + application:unset_env(lager, extra_sinks), ok = case control_action(start_app, []) of ok -> exit({got_success_but_expected_failure, log_rotation_no_write_permission_dir_test}); - {badrpc, {'EXIT', - {error, {cannot_log_to_file, _, _}}}} -> ok + {badrpc, + {'EXIT', {error, {cannot_log_to_file, _, Reason1}}}} + when Reason1 =:= enoent orelse Reason1 =:= eacces -> ok; + {badrpc, + {'EXIT', + {error, {cannot_log_to_file, _, + {cannot_create_parent_dirs, _, Reason1}}}}} + when Reason1 =:= eperm orelse + Reason1 =:= eacces orelse + Reason1 =:= enoent-> ok end, %% start application with logging to a subdirectory which %% parent directory has no write permissions - ok = control_action(stop_app, []), - TmpTestDir = "/tmp/rabbit-tests/no-permission/test/log", - ok = application:set_env(rabbit, error_logger, {file, TmpTestDir}), - ok = add_log_handlers([{error_logger_file_h, MainLog}]), + NoPermission2 = "/var/empty/non-existent/test.log", + delete_file(NoPermission2), + delete_file(filename:dirname(NoPermission2)), + case control_action(stop_app, []) of + ok -> ok; + {error, lager_not_running} -> ok + end, + ok = application:set_env(rabbit, lager_handler, NoPermission2), + application:unset_env(lager, handlers), + application:unset_env(lager, extra_sinks), ok = case control_action(start_app, []) of ok -> exit({got_success_but_expected_failure, log_rotatation_parent_dirs_test}); + {badrpc, + {'EXIT', {error, {cannot_log_to_file, _, Reason2}}}} + when Reason2 =:= enoent orelse Reason2 =:= eacces -> ok; {badrpc, {'EXIT', {error, {cannot_log_to_file, _, - {error, - {cannot_create_parent_dirs, _, eacces}}}}}} -> ok + {cannot_create_parent_dirs, _, Reason2}}}}} + when Reason2 =:= eperm orelse + Reason2 =:= eacces orelse + Reason2 =:= enoent-> ok end, - ok = set_permissions(TmpDir, 8#00700), - ok = set_permissions(TmpLog, 8#00600), - ok = delete_file(TmpLog), - ok = file:del_dir(TmpDir), - %% start application with standard error_logger_file_h - %% handler not installed - ok = control_action(stop_app, []), - ok = application:set_env(rabbit, error_logger, {file, MainLog}), - ok = control_action(start_app, []), - - %% start application with standard sasl handler not installed - %% and rabbit main log handler installed correctly - ok = control_action(stop_app, []), - ok = delete_log_handlers([rabbit_sasl_report_file_h]), + %% cleanup + ok = application:set_env(rabbit, lager_handler, LogFile), + application:unset_env(lager, handlers), + application:unset_env(lager, extra_sinks), ok = control_action(start_app, []), passed. -%% "rabbitmqctl rotate_logs" without additional parameters -%% shouldn't truncate files. -rotate_logs_without_suffix(Config) -> +externally_rotated_logs_are_automatically_reopened(Config) -> passed = rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, rotate_logs_without_suffix1, [Config]). - -rotate_logs_without_suffix1(_Config) -> - override_group_leader(), - - MainLog = rabbit:log_location(kernel), - SaslLog = rabbit:log_location(sasl), - Suffix = ".1", - file:delete(MainLog), - file:delete(SaslLog), + ?MODULE, externally_rotated_logs_are_automatically_reopened1, [Config]). - %% Empty log-files should be created - ok = control_action(rotate_logs, []), - [true, true] = empty_files([MainLog, SaslLog]), +externally_rotated_logs_are_automatically_reopened1(_Config) -> + [LogFile] = rabbit:log_locations(), - %% Write something to log files and simulate external log rotation - ok = test_logs_working(MainLog, SaslLog), - ok = file:rename(MainLog, [MainLog, Suffix]), - ok = file:rename(SaslLog, [SaslLog, Suffix]), + %% Make sure log file is opened + ok = test_logs_working([LogFile]), - %% Create non-empty files - TestData = "test-data\n", - file:write_file(MainLog, TestData), - file:write_file(SaslLog, TestData), + %% Move it away - i.e. external log rotation happened + file:rename(LogFile, [LogFile, ".rotation_test"]), - %% Nothing should be truncated - neither moved files which are still - %% opened by server, nor new log files that should be just reopened. - ok = control_action(rotate_logs, []), - [true, true, true, true] = - non_empty_files([MainLog, SaslLog, [MainLog, Suffix], - [SaslLog, Suffix]]), - - %% And log files should be re-opened - new log records should go to - %% new files. - ok = test_logs_working(MainLog, SaslLog), - true = (rabbit_file:file_size(MainLog) > length(TestData)), - true = (rabbit_file:file_size(SaslLog) > length(TestData)), + %% New files should be created - test_logs_working/1 will check that + %% LogFile is not empty after doing some logging. And it's exactly + %% what we need to check here. + ok = test_logs_working([LogFile]), passed. -override_group_leader() -> - %% Override group leader, otherwise SASL fake events are ignored by - %% the error_logger local to RabbitMQ. - {group_leader, Leader} = erlang:process_info(whereis(rabbit), group_leader), - erlang:group_leader(Leader, self()). +empty_or_nonexist_files(Files) -> + [case file:read_file_info(File) of + {ok, FInfo} -> FInfo#file_info.size == 0; + {error, enoent} -> true; + Error -> Error + end || File <- Files]. empty_files(Files) -> [case file:read_file_info(File) of @@ -2048,12 +2027,11 @@ non_empty_files(Files) -> _ -> not(EmptyFile) end || EmptyFile <- empty_files(Files)]. -test_logs_working(MainLogFile, SaslLogFile) -> - ok = rabbit_log:error("Log a test message~n"), - ok = error_logger:error_report(crash_report, [fake_crash_report, ?MODULE]), +test_logs_working(LogFiles) -> + ok = rabbit_log:error("Log a test message"), %% give the error loggers some time to catch up - timer:sleep(100), - [true, true] = non_empty_files([MainLogFile, SaslLogFile]), + timer:sleep(200), + lists:all(fun(LogFile) -> [true] =:= non_empty_files([LogFile]) end, LogFiles), ok. set_permissions(Path, Mode) -> From 3d6a790272be7ce4bca7fb8f3bf4d5a98a4b2cee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Fri, 27 May 2016 11:28:41 +0200 Subject: [PATCH 118/174] Makefile: Add several plugins to TEST_DEPS Those plugins have a Cuttlefish schema which is needed by config_schema_SUITE. References #725. [#116526487] --- Makefile | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/Makefile b/Makefile index 781da8de8037..e75263ca7087 100644 --- a/Makefile +++ b/Makefile @@ -67,6 +67,19 @@ DEPS += $(DISTRIBUTED_DEPS) endif endif +# We need many plugins for their Cuttlefish schemas. +TEST_DEPS += rabbitmq_amqp1_0 \ + rabbitmq_auth_backend_amqp \ + rabbitmq_auth_backend_http \ + rabbitmq_auth_backend_ldap \ + rabbitmq_clusterer \ + rabbitmq_management \ + rabbitmq_metronome \ + rabbitmq_mqtt \ + rabbitmq_stomp \ + rabbitmq_web_mqtt \ + rabbitmq_web_stomp + # FIXME: Remove rabbitmq_test as TEST_DEPS from here for now. TEST_DEPS := amqp_client meck $(filter-out rabbitmq_test,$(TEST_DEPS)) From 0ee7b1580ed683a924434cf56250d250123b70a0 Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Wed, 25 May 2016 07:46:34 +0100 Subject: [PATCH 119/174] Take update mirroring decision in rabbit_amqqueue_process --- src/rabbit_amqqueue_process.erl | 69 ++++++++++++++++++++++++-------- src/rabbit_mirror_queue_misc.erl | 17 ++++---- 2 files changed, 61 insertions(+), 25 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 3ee14e4f7db6..bf7484557d0f 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -54,6 +54,7 @@ max_length, max_bytes, args_policy_version, + mirroring_policy_version = 0, status }). @@ -1207,22 +1208,15 @@ handle_cast({set_maximum_since_use, Age}, State) -> ok = file_handle_cache:set_maximum_since_use(Age), noreply(State); -handle_cast(start_mirroring, State = #q{backing_queue = BQ, - backing_queue_state = BQS}) -> - %% lookup again to get policy for init_with_existing_bq - {ok, Q} = rabbit_amqqueue:lookup(qname(State)), - true = BQ =/= rabbit_mirror_queue_master, %% assertion - BQ1 = rabbit_mirror_queue_master, - BQS1 = BQ1:init_with_existing_bq(Q, BQ, BQS), - noreply(State#q{backing_queue = BQ1, - backing_queue_state = BQS1}); - -handle_cast(stop_mirroring, State = #q{backing_queue = BQ, - backing_queue_state = BQS}) -> - BQ = rabbit_mirror_queue_master, %% assertion - {BQ1, BQS1} = BQ:stop_mirroring(BQS), - noreply(State#q{backing_queue = BQ1, - backing_queue_state = BQS1}); +handle_cast(update_mirroring, State = #q{q = Q, + mirroring_policy_version = Version}) -> + case needs_update_mirroring(Q, Version) of + false -> + noreply(State); + {Policy, NewVersion} -> + State1 = State#q{mirroring_policy_version = NewVersion}, + noreply(update_mirroring(Policy, State1)) + end; handle_cast({credit, ChPid, CTag, Credit, Drain}, State = #q{consumers = Consumers, @@ -1381,9 +1375,52 @@ log_auto_delete(Reason, #q{ q = #amqqueue{ name = Resource } }) -> Reason, [QName, VHost]). +needs_update_mirroring(_Q, _Version) -> + %% hook here GaS changes on the policy + true. + +update_mirroring(Policy, State = #q{backing_queue = BQ}) -> + case update_to(Policy, BQ) of + start_mirroring -> + start_mirroring(State); + stop_mirroring -> + stop_mirroring(State); + ignore -> + State; + update_ha_mode -> + update_ha_mode(State) + end. +update_to(undefined, rabbit_mirror_queue_master) -> + stop_mirroring; +update_to(_, rabbit_mirror_queue_master) -> + update_ha_mode; +update_to(undefined, BQ) when BQ =/= rabbit_mirror_queue_master -> + ignore; +update_to(_, BQ) when BQ =/= rabbit_mirror_queue_master -> + start_mirroring. + +start_mirroring(State = #q{backing_queue = BQ, + backing_queue_state = BQS}) -> + %% lookup again to get policy for init_with_existing_bq + {ok, Q} = rabbit_amqqueue:lookup(qname(State)), + true = BQ =/= rabbit_mirror_queue_master, %% assertion + BQ1 = rabbit_mirror_queue_master, + BQS1 = BQ1:init_with_existing_bq(Q, BQ, BQS), + State#q{backing_queue = BQ1, + backing_queue_state = BQS1}. +stop_mirroring(State = #q{backing_queue = BQ, + backing_queue_state = BQS}) -> + BQ = rabbit_mirror_queue_master, %% assertion + {BQ1, BQS1} = BQ:stop_mirroring(BQS), + State#q{backing_queue = BQ1, + backing_queue_state = BQS1}. +update_ha_mode(State) -> + {ok, Q} = rabbit_amqqueue:lookup(qname(State)), + ok = rabbit_mirror_queue_misc:update_mirrors(Q), + State. diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl index 849efa361174..b188298a9bab 100644 --- a/src/rabbit_mirror_queue_misc.erl +++ b/src/rabbit_mirror_queue_misc.erl @@ -20,7 +20,7 @@ -export([remove_from_queue/3, on_node_up/0, add_mirrors/3, report_deaths/4, store_updated_slaves/1, initial_queue_node/2, suggested_queue_nodes/1, - is_mirrored/1, update_mirrors/2, validate_policy/1, + is_mirrored/1, update_mirrors/2, update_mirrors/1, validate_policy/1, maybe_auto_sync/1, maybe_drop_master_after_sync/1, sync_batch_size/1, log_info/3, log_warning/3]). @@ -64,6 +64,8 @@ -spec(is_mirrored/1 :: (rabbit_types:amqqueue()) -> boolean()). -spec(update_mirrors/2 :: (rabbit_types:amqqueue(), rabbit_types:amqqueue()) -> 'ok'). +-spec(update_mirrors/1 :: + (rabbit_types:amqqueue()) -> 'ok'). -spec(maybe_drop_master_after_sync/1 :: (rabbit_types:amqqueue()) -> 'ok'). -spec(maybe_auto_sync/1 :: (rabbit_types:amqqueue()) -> 'ok'). -spec(log_info/3 :: (rabbit_amqqueue:name(), string(), [any()]) -> 'ok'). @@ -384,15 +386,12 @@ update_mirrors(OldQ = #amqqueue{pid = QPid}, NewQ = #amqqueue{pid = QPid}) -> case {is_mirrored(OldQ), is_mirrored(NewQ)} of {false, false} -> ok; - {true, false} -> rabbit_amqqueue:stop_mirroring(QPid); - {false, true} -> rabbit_amqqueue:start_mirroring(QPid); - {true, true} -> update_mirrors0(OldQ, NewQ) + _ -> rabbit_amqqueue:update_mirroring(QPid) end. -update_mirrors0(OldQ = #amqqueue{name = QName}, - NewQ = #amqqueue{name = QName}) -> - {OldMNode, OldSNodes, _} = actual_queue_nodes(OldQ), - {NewMNode, NewSNodes} = suggested_queue_nodes(NewQ), +update_mirrors(Q = #amqqueue{name = QName}) -> + {OldMNode, OldSNodes, _} = actual_queue_nodes(Q), + {NewMNode, NewSNodes} = suggested_queue_nodes(Q), OldNodes = [OldMNode | OldSNodes], NewNodes = [NewMNode | NewSNodes], %% When a mirror dies, remove_from_queue/2 might have to add new @@ -406,7 +405,7 @@ update_mirrors0(OldQ = #amqqueue{name = QName}, drop_mirrors(QName, OldNodes -- NewNodes), %% This is for the case where no extra nodes were added but we changed to %% a policy requiring auto-sync. - maybe_auto_sync(NewQ), + maybe_auto_sync(Q), ok. %% The arrival of a newly synced slave may cause the master to die if From 217488728781c13cda413b6d0308991ba752633d Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Fri, 27 May 2016 14:44:16 +0300 Subject: [PATCH 120/174] Include rabbitmq_trust_store into RabbitMQ distribution [Finishes ##120401213] --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index d8761c022ffc..c3c0df5c77c6 100644 --- a/Makefile +++ b/Makefile @@ -53,6 +53,7 @@ DISTRIBUTED_DEPS := rabbitmq_amqp1_0 \ rabbitmq_shovel_management \ rabbitmq_stomp \ rabbitmq_tracing \ + rabbitmq_trust_store \ rabbitmq_web_dispatch \ rabbitmq_web_stomp \ rabbitmq_web_stomp_examples From 66e6a917f728de933a146cae08590d82434136dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Fri, 27 May 2016 16:55:44 +0200 Subject: [PATCH 121/174] Lager extra sinks are renamed to `rabbit_log_*` --- src/rabbit_amqqueue_process.erl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 3ee14e4f7db6..e6eab47bbce5 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -1371,15 +1371,15 @@ log_delete_exclusive({ConPid, _ConRef}, State) -> log_delete_exclusive(ConPid, State); log_delete_exclusive(ConPid, #q{ q = #amqqueue{ name = Resource } }) -> #resource{ name = QName, virtual_host = VHost } = Resource, - rabbit_queue:debug("Deleting exclusive queue '~s' in vhost '~s' " ++ - " because its declaring connection ~p was closed", - [QName, VHost, ConPid]). + rabbit_log_queue:debug("Deleting exclusive queue '~s' in vhost '~s' " ++ + "because its declaring connection ~p was closed", + [QName, VHost, ConPid]). log_auto_delete(Reason, #q{ q = #amqqueue{ name = Resource } }) -> #resource{ name = QName, virtual_host = VHost } = Resource, - rabbit_queue:debug("Deleting auto-delete queue '~s' in vhost '~s' " ++ - Reason, - [QName, VHost]). + rabbit_log_queue:debug("Deleting auto-delete queue '~s' in vhost '~s' " ++ + Reason, + [QName, VHost]). From 38be6a77d2f8322801a61e2222c22fc665990cf0 Mon Sep 17 00:00:00 2001 From: Gabriele Santomaggio Date: Fri, 27 May 2016 11:58:32 +0200 Subject: [PATCH 122/174] Update policy version on set_policy and use it on update_mirroring --- src/rabbit_amqqueue_process.erl | 17 +++++++++++------ src/rabbit_policy.erl | 4 +++- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index bf7484557d0f..59c9d7826efa 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -703,7 +703,7 @@ handle_ch_down(DownPid, State = #q{consumers = Consumers, exclusive_consumer = Holder1}, notify_decorators(State2), case should_auto_delete(State2) of - true -> + true -> log_auto_delete( io_lib:format( "because all of its consumers (~p) were on a channel that was closed", @@ -1072,11 +1072,11 @@ handle_call({basic_cancel, ChPid, ConsumerTag, OkMsg}, _From, notify_decorators(State1), case should_auto_delete(State1) of false -> reply(ok, ensure_expiry_timer(State1)); - true -> + true -> log_auto_delete( io_lib:format( "because its last consumer with tag '~s' was cancelled", - [ConsumerTag]), + [ConsumerTag]), State), stop(ok, State1) end @@ -1375,9 +1375,14 @@ log_auto_delete(Reason, #q{ q = #amqqueue{ name = Resource } }) -> Reason, [QName, VHost]). -needs_update_mirroring(_Q, _Version) -> - %% hook here GaS changes on the policy - true. +needs_update_mirroring(Q, Version) -> + {ok, UpQ} = rabbit_amqqueue:lookup(Q#amqqueue.name), + DBVersion = UpQ#amqqueue.policy_version, + case DBVersion > Version of + true -> {rabbit_policy:get(<<"ha-mode">>, UpQ), DBVersion}; + false -> false + end. + update_mirroring(Policy, State = #q{backing_queue = BQ}) -> case update_to(Policy, BQ) of diff --git a/src/rabbit_policy.erl b/src/rabbit_policy.erl index eb8cf6332737..a9caadf97289 100644 --- a/src/rabbit_policy.erl +++ b/src/rabbit_policy.erl @@ -276,7 +276,9 @@ update_queue(Q = #amqqueue{name = QName, policy = OldPolicy}, Policies) -> NewPolicy -> case rabbit_amqqueue:update( QName, fun(Q1) -> rabbit_queue_decorator:set( - Q1#amqqueue{policy = NewPolicy}) + Q1#amqqueue{policy = NewPolicy, + policy_version = + Q1#amqqueue.policy_version + 1 }) end) of #amqqueue{} = Q1 -> {Q, Q1}; not_found -> {Q, Q } From a36787f6c68db4674bc7651547ab8c18b40cc5a9 Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Fri, 27 May 2016 14:04:56 +0100 Subject: [PATCH 123/174] Test concurrent application of HA policies --- Makefile | 2 +- test/dynamic_ha_SUITE.erl | 111 +++++++++++++++++++++++++++++++++++++- 2 files changed, 111 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index e75263ca7087..3d2a5bb88e44 100644 --- a/Makefile +++ b/Makefile @@ -81,7 +81,7 @@ TEST_DEPS += rabbitmq_amqp1_0 \ rabbitmq_web_stomp # FIXME: Remove rabbitmq_test as TEST_DEPS from here for now. -TEST_DEPS := amqp_client meck $(filter-out rabbitmq_test,$(TEST_DEPS)) +TEST_DEPS := amqp_client meck proper $(filter-out rabbitmq_test,$(TEST_DEPS)) include erlang.mk diff --git a/test/dynamic_ha_SUITE.erl b/test/dynamic_ha_SUITE.erl index 5872d97d4cbd..8abfa686266d 100644 --- a/test/dynamic_ha_SUITE.erl +++ b/test/dynamic_ha_SUITE.erl @@ -31,6 +31,7 @@ %% The first two are change_policy, the last two are change_cluster -include_lib("common_test/include/ct.hrl"). +-include_lib("proper/include/proper.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). @@ -60,7 +61,8 @@ groups() -> ]}, {cluster_size_3, [], [ change_policy, - rapid_change + rapid_change, + random_policy ]} ]} ]. @@ -253,6 +255,9 @@ promote_on_shutdown(Config) -> durable = true}), ok. +random_policy(Config) -> + run_proper(fun prop_random_policy/1, [Config]). + %%---------------------------------------------------------------------------- assert_slaves(RPCNode, QName, Exp) -> @@ -327,3 +332,107 @@ get_stacktrace() -> _:e -> erlang:get_stacktrace() end. + +%%---------------------------------------------------------------------------- +run_proper(Fun, Args) -> + case proper:counterexample(erlang:apply(Fun, Args), + [{numtests, 25}, + {on_output, fun(F, A) -> + io:format(user, F, A) + end}]) of + true -> + true; + Value -> + exit(Value) + end. + +prop_random_policy(Config) -> + [NodeA, _, _] = Nodes = rabbit_ct_broker_helpers:get_node_configs( + Config, nodename), + ?FORALL( + Policies, non_empty(list(policy_gen(Nodes))), + begin + Ch = rabbit_ct_client_helpers:open_channel(Config, NodeA), + amqp_channel:call(Ch, #'queue.declare'{queue = ?QNAME}), + %% Add some load so mirrors can be busy synchronising + rabbit_ct_client_helpers:publish(Ch, ?QNAME, 100000), + %% Apply policies in parallel on all nodes + apply_in_parallel(Config, Nodes, Policies), + %% The last policy is the final state + Last = lists:last(Policies), + %% Give it some time to generate all internal notifications + timer:sleep(2000), + %% Ensure the owner/master is able to process a call request, + %% which means that all pending casts have been processed. + %% Use the information returned by owner/master to verify the + %% test result + Info = find_queue(?QNAME, NodeA), + %% Gets owner/master + Pid = proplists:get_value(pid, Info), + FinalInfo = rpc:call(node(Pid), gen_server, call, [Pid, info], 5000), + %% Check the result + Result = verify_policy(Last, FinalInfo), + %% Cleanup + amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}), + (catch rabbit_ct_broker_helpers:clear_policy(Config, NodeA, ?POLICY)), + Result + end). + +apply_in_parallel(Config, Nodes, Policies) -> + Self = self(), + [spawn_link(fun() -> + [begin + apply_policy(Config, N, Policy) + end || Policy <- Policies], + Self ! parallel_task_done + end) || N <- Nodes], + [receive + parallel_task_done -> + ok + end || _ <- Nodes]. + +%% Proper generators +policy_gen(Nodes) -> + %% Stop mirroring needs to be called often to trigger rabbitmq-server#803 + frequency([{3, undefined}, + {1, all}, + {1, {nodes, nodes_gen(Nodes)}}, + {1, {exactly, choose(1, 3)}} + ]). + +nodes_gen(Nodes) -> + ?LET(List, non_empty(list(oneof(Nodes))), + sets:to_list(sets:from_list(List))). + +%% Checks +verify_policy(undefined, Info) -> + %% If the queue is not mirrored, it returns '' + '' == proplists:get_value(slave_pids, Info); +verify_policy(all, Info) -> + 2 == length(proplists:get_value(slave_pids, Info)); +verify_policy({exactly, 1}, Info) -> + %% If the queue is mirrored, it returns a list + [] == proplists:get_value(slave_pids, Info); +verify_policy({exactly, N}, Info) -> + (N - 1) == length(proplists:get_value(slave_pids, Info)); +verify_policy({nodes, Nodes}, Info) -> + Master = node(proplists:get_value(pid, Info)), + Slaves = [node(P) || P <- proplists:get_value(slave_pids, Info)], + lists:sort(Nodes) == lists:sort([Master | Slaves]). + +%% Policies +apply_policy(Config, N, undefined) -> + (catch rabbit_ct_broker_helpers:clear_policy(Config, N, ?POLICY)); +apply_policy(Config, N, all) -> + rabbit_ct_broker_helpers:set_ha_policy( + Config, N, ?POLICY, <<"all">>, + [{<<"ha-sync-mode">>, <<"automatic">>}]); +apply_policy(Config, N, {nodes, Nodes}) -> + NNodes = [rabbit_misc:atom_to_binary(Node) || Node <- Nodes], + rabbit_ct_broker_helpers:set_ha_policy( + Config, N, ?POLICY, {<<"nodes">>, NNodes}, + [{<<"ha-sync-mode">>, <<"automatic">>}]); +apply_policy(Config, N, {exactly, Exactly}) -> + rabbit_ct_broker_helpers:set_ha_policy( + Config, N, ?POLICY, {<<"exactly">>, Exactly}, + [{<<"ha-sync-mode">>, <<"automatic">>}]). From c00bd9f513a97538f428637843db68848bdef063 Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Fri, 27 May 2016 15:07:19 +0100 Subject: [PATCH 124/174] Check return value of clear_policy in the test instead of auxiliary function --- test/dynamic_ha_SUITE.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/dynamic_ha_SUITE.erl b/test/dynamic_ha_SUITE.erl index 8abfa686266d..c54e4c2994ab 100644 --- a/test/dynamic_ha_SUITE.erl +++ b/test/dynamic_ha_SUITE.erl @@ -139,7 +139,7 @@ change_policy(Config) -> assert_slaves(A, ?QNAME, {A, [C]}, [{A, [B, C]}]), %% Clear the policy, and we go back to non-mirrored - rabbit_ct_broker_helpers:clear_policy(Config, A, ?POLICY), + ok = rabbit_ct_broker_helpers:clear_policy(Config, A, ?POLICY), assert_slaves(A, ?QNAME, {A, ''}), %% Test switching "away" from an unmirrored node @@ -208,7 +208,7 @@ rapid_loop(Config, Node, MRef) -> after 0 -> rabbit_ct_broker_helpers:set_ha_policy(Config, Node, ?POLICY, <<"all">>), - rabbit_ct_broker_helpers:clear_policy(Config, Node, ?POLICY), + ok = rabbit_ct_broker_helpers:clear_policy(Config, Node, ?POLICY), rapid_loop(Config, Node, MRef) end. @@ -374,7 +374,7 @@ prop_random_policy(Config) -> Result = verify_policy(Last, FinalInfo), %% Cleanup amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}), - (catch rabbit_ct_broker_helpers:clear_policy(Config, NodeA, ?POLICY)), + _ = rabbit_ct_broker_helpers:clear_policy(Config, NodeA, ?POLICY), Result end). @@ -422,7 +422,7 @@ verify_policy({nodes, Nodes}, Info) -> %% Policies apply_policy(Config, N, undefined) -> - (catch rabbit_ct_broker_helpers:clear_policy(Config, N, ?POLICY)); + _ = rabbit_ct_broker_helpers:clear_policy(Config, N, ?POLICY); apply_policy(Config, N, all) -> rabbit_ct_broker_helpers:set_ha_policy( Config, N, ?POLICY, <<"all">>, From f14ae2212e649d3d1017e0aa3a3ae93b1778cfa8 Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Fri, 27 May 2016 16:05:13 +0100 Subject: [PATCH 125/174] Upgrade amqqueue record with policy_version --- src/rabbit_upgrade_functions.erl | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index b99a1d12ee27..0f55b9e4a961 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -52,6 +52,7 @@ -rabbit_upgrade({down_slave_nodes, mnesia, [queue_decorators]}). -rabbit_upgrade({queue_state, mnesia, [down_slave_nodes]}). -rabbit_upgrade({recoverable_slaves, mnesia, [queue_state]}). +-rabbit_upgrade({policy_version, mnesia, [recoverable_slaves]}). -rabbit_upgrade({user_password_hashing, mnesia, [hash_passwords]}). %% ------------------------------------------------------------------- @@ -447,6 +448,24 @@ recoverable_slaves(Table) -> sync_slave_pids, recoverable_slaves, policy, gm_pids, decorators, state]). +policy_version() -> + ok = policy_version(rabbit_queue), + ok = policy_version(rabbit_durable_queue). + +policy_version(Table) -> + transform( + Table, + fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments, + Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators, + State}) -> + {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments, + Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators, + State, 0} + end, + [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids, + sync_slave_pids, recoverable_slaves, policy, gm_pids, decorators, state, + policy_version]). + %% Prior to 3.6.0, passwords were hashed using MD5, this populates %% existing records with said default. Users created with 3.6.0+ will %% have internal_user.hashing_algorithm populated by the internal From 58d58398b46d37e28f37365cfbffedc638fbd092 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Sat, 28 May 2016 14:22:48 +0300 Subject: [PATCH 126/174] Update CONTRIBUTING.md, add CODE_OF_CONDUCT.md --- CODE_OF_CONDUCT.md | 44 ++++++++++++++++++++++++++++++++++++++++++++ CONTRIBUTING.md | 17 ++--------------- 2 files changed, 46 insertions(+), 15 deletions(-) create mode 100644 CODE_OF_CONDUCT.md diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..1f6ef1c576e7 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,44 @@ +# Contributor Code of Conduct + +As contributors and maintainers of this project, and in the interest of fostering an open +and welcoming community, we pledge to respect all people who contribute through reporting +issues, posting feature requests, updating documentation, submitting pull requests or +patches, and other activities. + +We are committed to making participation in this project a harassment-free experience for +everyone, regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, body size, race, ethnicity, age, +religion, or nationality. + +Examples of unacceptable behavior by participants include: + + * The use of sexualized language or imagery + * Personal attacks + * Trolling or insulting/derogatory comments + * Public or private harassment + * Publishing other's private information, such as physical or electronic addresses, + without explicit permission + * Other unethical or unprofessional conduct + +Project maintainers have the right and responsibility to remove, edit, or reject comments, +commits, code, wiki edits, issues, and other contributions that are not aligned to this +Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors +that they deem inappropriate, threatening, offensive, or harmful. + +By adopting this Code of Conduct, project maintainers commit themselves to fairly and +consistently applying these principles to every aspect of managing this project. Project +maintainers who do not follow or enforce the Code of Conduct may be permanently removed +from the project team. + +This Code of Conduct applies both within project spaces and in public spaces when an +individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by +contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will +be reviewed and investigated and will result in a response that is deemed necessary and +appropriate to the circumstances. Maintainers are obligated to maintain confidentiality +with regard to the reporter of an incident. + +This Code of Conduct is adapted from the +[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at +[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 69a4b4a437fd..45bbcbe62e74 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users). -## (Brief) Code of Conduct +## Code of Conduct -In one line: don't be a dick. - -Be respectful to the maintainers and other contributors. Open source -contributors put long hours into developing projects and doing user -support. Those projects and user support are available for free. We -believe this deserves some respect. - -Be respectful to people of all races, genders, religious beliefs and -political views. Regardless of how brilliant a pull request is -technically, we will not tolerate disrespectful or aggressive -behaviour. - -Contributors who violate this straightforward Code of Conduct will see -their pull requests closed and locked. +See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md). ## Contributor Agreement From f32ae7e94f1e6c564490628399ffab0a4aef0d16 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Sat, 28 May 2016 14:22:48 +0300 Subject: [PATCH 127/174] Update CONTRIBUTING.md, add CODE_OF_CONDUCT.md --- CODE_OF_CONDUCT.md | 44 ++++++++++++++++++++++++++++++++++++++++++++ CONTRIBUTING.md | 17 ++--------------- 2 files changed, 46 insertions(+), 15 deletions(-) create mode 100644 CODE_OF_CONDUCT.md diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..1f6ef1c576e7 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,44 @@ +# Contributor Code of Conduct + +As contributors and maintainers of this project, and in the interest of fostering an open +and welcoming community, we pledge to respect all people who contribute through reporting +issues, posting feature requests, updating documentation, submitting pull requests or +patches, and other activities. + +We are committed to making participation in this project a harassment-free experience for +everyone, regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, body size, race, ethnicity, age, +religion, or nationality. + +Examples of unacceptable behavior by participants include: + + * The use of sexualized language or imagery + * Personal attacks + * Trolling or insulting/derogatory comments + * Public or private harassment + * Publishing other's private information, such as physical or electronic addresses, + without explicit permission + * Other unethical or unprofessional conduct + +Project maintainers have the right and responsibility to remove, edit, or reject comments, +commits, code, wiki edits, issues, and other contributions that are not aligned to this +Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors +that they deem inappropriate, threatening, offensive, or harmful. + +By adopting this Code of Conduct, project maintainers commit themselves to fairly and +consistently applying these principles to every aspect of managing this project. Project +maintainers who do not follow or enforce the Code of Conduct may be permanently removed +from the project team. + +This Code of Conduct applies both within project spaces and in public spaces when an +individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by +contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will +be reviewed and investigated and will result in a response that is deemed necessary and +appropriate to the circumstances. Maintainers are obligated to maintain confidentiality +with regard to the reporter of an incident. + +This Code of Conduct is adapted from the +[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at +[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 69a4b4a437fd..45bbcbe62e74 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users). -## (Brief) Code of Conduct +## Code of Conduct -In one line: don't be a dick. - -Be respectful to the maintainers and other contributors. Open source -contributors put long hours into developing projects and doing user -support. Those projects and user support are available for free. We -believe this deserves some respect. - -Be respectful to people of all races, genders, religious beliefs and -political views. Regardless of how brilliant a pull request is -technically, we will not tolerate disrespectful or aggressive -behaviour. - -Contributors who violate this straightforward Code of Conduct will see -their pull requests closed and locked. +See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md). ## Contributor Agreement From 22ab163ddbdb254ea384c27bde3814650498c61e Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Sun, 29 May 2016 23:21:53 +0300 Subject: [PATCH 128/174] Update rabbitmq-components.mk --- rabbitmq-components.mk | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/rabbitmq-components.mk b/rabbitmq-components.mk index 920a67b1210a..4d610020521a 100644 --- a/rabbitmq-components.mk +++ b/rabbitmq-components.mk @@ -54,7 +54,9 @@ dep_rabbitmq_management_visualiser = git_rmq rabbitmq-management-visualiser $ dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master +dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master +dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master @@ -111,7 +113,9 @@ RABBITMQ_COMPONENTS = amqp_client \ rabbitmq_message_timestamp \ rabbitmq_metronome \ rabbitmq_mqtt \ + rabbitmq_objc_client \ rabbitmq_recent_history_exchange \ + rabbitmq_routing_node_stamp \ rabbitmq_rtopic_exchange \ rabbitmq_sharding \ rabbitmq_shovel \ From ec5a71d79ac5e8e200ce51827a3fb2c4b8887de1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Mon, 30 May 2016 18:04:27 +0200 Subject: [PATCH 129/174] config_schema_SUITE: Copy Cuttlefish schema in the testsuite data directory We must do that instead of adding the plugins as test dependencies because doing this introduces a circular dependency: all plugins depend on rabbitmq-server. With the circular dependency in place, the broker was compiled with $(ERLC_OPTS) compiler flags, because the build was triggered by the first plugin. Therefore we missed all the $(TEST_ERLC_OPTS) additions. With the circular dependency fixed, the broker is again built with $(TEST_ERLC_OPTIS): `src/truncate.erl` depends on the `-DTEST=1` flag. In the longer term, we must move the testcases depending on the plugins' schemas to their corresponding plugin. --- Makefile | 13 - test/config_schema_SUITE.erl | 12 +- .../schema/rabbitmq.schema | 961 ++++++++++++++++++ .../schema/rabbitmq_amqp1_0.schema | 31 + .../schema/rabbitmq_auth_backend_amqp.schema | 27 + .../schema/rabbitmq_auth_backend_http.schema | 15 + .../schema/rabbitmq_auth_backend_ldap.schema | 183 ++++ .../schema/rabbitmq_clusterer.schema | 58 ++ .../schema/rabbitmq_management.schema | 203 ++++ .../schema/rabbitmq_metronome.schema | 9 + .../schema/rabbitmq_mqtt.schema | 235 +++++ .../schema/rabbitmq_stomp.schema | 110 ++ .../schema/rabbitmq_web_mqtt.schema | 44 + .../schema/rabbitmq_web_stomp.schema | 64 ++ 14 files changed, 1941 insertions(+), 24 deletions(-) create mode 100644 test/config_schema_SUITE_data/schema/rabbitmq.schema create mode 100644 test/config_schema_SUITE_data/schema/rabbitmq_amqp1_0.schema create mode 100644 test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_amqp.schema create mode 100644 test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_http.schema create mode 100644 test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_ldap.schema create mode 100644 test/config_schema_SUITE_data/schema/rabbitmq_clusterer.schema create mode 100644 test/config_schema_SUITE_data/schema/rabbitmq_management.schema create mode 100644 test/config_schema_SUITE_data/schema/rabbitmq_metronome.schema create mode 100644 test/config_schema_SUITE_data/schema/rabbitmq_mqtt.schema create mode 100644 test/config_schema_SUITE_data/schema/rabbitmq_stomp.schema create mode 100644 test/config_schema_SUITE_data/schema/rabbitmq_web_mqtt.schema create mode 100644 test/config_schema_SUITE_data/schema/rabbitmq_web_stomp.schema diff --git a/Makefile b/Makefile index c3e420e2aa7b..5d35ab18f4a6 100644 --- a/Makefile +++ b/Makefile @@ -68,19 +68,6 @@ DEPS += $(DISTRIBUTED_DEPS) endif endif -# We need many plugins for their Cuttlefish schemas. -TEST_DEPS += rabbitmq_amqp1_0 \ - rabbitmq_auth_backend_amqp \ - rabbitmq_auth_backend_http \ - rabbitmq_auth_backend_ldap \ - rabbitmq_clusterer \ - rabbitmq_management \ - rabbitmq_metronome \ - rabbitmq_mqtt \ - rabbitmq_stomp \ - rabbitmq_web_mqtt \ - rabbitmq_web_stomp - # FIXME: Remove rabbitmq_test as TEST_DEPS from here for now. TEST_DEPS := amqp_client meck proper $(filter-out rabbitmq_test,$(TEST_DEPS)) diff --git a/test/config_schema_SUITE.erl b/test/config_schema_SUITE.erl index b1362aac287f..79e7220e980f 100644 --- a/test/config_schema_SUITE.erl +++ b/test/config_schema_SUITE.erl @@ -57,11 +57,10 @@ init_per_testcase(Testcase, Config) -> ]), Config2 = case Testcase of run_snippets -> - SchemaDir = filename:join(?config(priv_dir, Config1), "schema"), + SchemaDir = filename:join(?config(data_dir, Config1), "schema"), ResultsDir = filename:join(?config(priv_dir, Config1), "results"), Snippets = filename:join(?config(data_dir, Config1), "snippets.config"), - ok = file:make_dir(SchemaDir), ok = file:make_dir(ResultsDir), rabbit_ct_helpers:set_config(Config1, [ {schema_dir, SchemaDir}, @@ -88,7 +87,6 @@ run_snippets(Config) -> ?MODULE, run_snippets1, [Config]). run_snippets1(Config) -> - prepare_plugin_schemas(Config), {ok, [Snippets]} = file:consult(?config(conf_snippets, Config)), lists:map( fun({N, S, C, P}) -> ok = test_snippet(Config, {integer_to_list(N), S, []}, C, P); @@ -128,14 +126,6 @@ generate_config(Config, ConfFile, AdvancedFile) -> rabbit_config:generate_config_file([ConfFile], ResultsDir, ScriptDir, SchemaDir, AdvancedFile). -prepare_plugin_schemas(Config) -> - SchemaDir = ?config(schema_dir, Config), - DepsDir = ?config(erlang_mk_depsdir, Config), - Files = filelib:wildcard( - filename:join(DepsDir, "*/priv/schema/*.schema")), - [ file:copy(File, filename:join([SchemaDir, filename:basename(File)])) - || File <- Files ]. - deepsort(List) -> case is_proplist(List) of true -> diff --git a/test/config_schema_SUITE_data/schema/rabbitmq.schema b/test/config_schema_SUITE_data/schema/rabbitmq.schema new file mode 100644 index 000000000000..19040da409d3 --- /dev/null +++ b/test/config_schema_SUITE_data/schema/rabbitmq.schema @@ -0,0 +1,961 @@ +% ============================== +% Rabbit app section +% ============================== + +%% +%% Network Connectivity +%% ==================== +%% + +%% By default, RabbitMQ will listen on all interfaces, using +%% the standard (reserved) AMQP port. +%% +%% {tcp_listeners, [5672]}, +%% To listen on a specific interface, provide a tuple of {IpAddress, Port}. +%% For example, to listen only on localhost for both IPv4 and IPv6: +%% +%% {tcp_listeners, [{"127.0.0.1", 5672}, +%% {"[::1]", 5672}]}, + +{mapping, "listeners.tcp", "rabbit.tcp_listeners",[ + {datatype, {enum, [none]}} +]}. + +{mapping, "listeners.tcp.$name", "rabbit.tcp_listeners",[ + {datatype, [integer, ip]} +]}. + +{translation, "rabbit.tcp_listeners", +fun(Conf) -> + case cuttlefish:conf_get("listeners.tcp", Conf, undefined) of + none -> []; + _ -> + Settings = cuttlefish_variable:filter_by_prefix("listeners.tcp", Conf), + [ V || {_, V} <- Settings ] + end +end}. + +%% SSL listeners are configured in the same fashion as TCP listeners, +%% including the option to control the choice of interface. +%% +%% {ssl_listeners, [5671]}, + +{mapping, "listeners.ssl", "rabbit.ssl_listeners",[ + {datatype, {enum, [none]}} +]}. + +{mapping, "listeners.ssl.$name", "rabbit.ssl_listeners",[ + {datatype, [integer, ip]} +]}. + +{translation, "rabbit.ssl_listeners", +fun(Conf) -> + case cuttlefish:conf_get("listeners.ssl", Conf, undefined) of + none -> []; + _ -> + Settings = cuttlefish_variable:filter_by_prefix("listeners.ssl", Conf), + [ V || {_, V} <- Settings ] + end +end}. + +%% Number of Erlang processes that will accept connections for the TCP +%% and SSL listeners. +%% +%% {num_tcp_acceptors, 10}, +%% {num_ssl_acceptors, 1}, + +{mapping, "num_acceptors.ssl", "rabbit.num_ssl_acceptors", [ + {datatype, integer} +]}. + +{mapping, "num_acceptors.tcp", "rabbit.num_tcp_acceptors", [ + {datatype, integer} +]}. + + +%% Maximum time for AMQP 0-8/0-9/0-9-1 handshake (after socket connection +%% and SSL handshake), in milliseconds. +%% +%% {handshake_timeout, 10000}, + +{mapping, "handshake_timeout", "rabbit.handshake_timeout", [ + {datatype, integer} +]}. + +%% Set to 'true' to perform reverse DNS lookups when accepting a +%% connection. Hostnames will then be shown instead of IP addresses +%% in rabbitmqctl and the management plugin. +%% +%% {reverse_dns_lookups, true}, + +{mapping, "reverse_dns_lookups", "rabbit.reverse_dns_lookups", [ + {datatype, {enum, [true, false]}} +]}. + +{mapping, "erlang.K", "vm_args.+K", [ + {default, "true"}, + {level, advanced} +]}. + +%% +%% Security / AAA +%% ============== +%% + +%% The default "guest" user is only permitted to access the server +%% via a loopback interface (e.g. localhost). +%% {loopback_users, [<<"guest">>]}, +%% +%% Uncomment the following line if you want to allow access to the +%% guest user from anywhere on the network. +%% {loopback_users, []}, + +{mapping, "loopback_users", "rabbit.loopback_users", [ + {datatype, {enum, [none]}} +]}. + +{mapping, "loopback_users.$user", "rabbit.loopback_users", [ + {datatype, atom} +]}. + +{translation, "rabbit.loopback_users", +fun(Conf) -> + None = cuttlefish:conf_get("loopback_users", Conf, undefined), + case None of + none -> []; + _ -> + Settings = cuttlefish_variable:filter_by_prefix("loopback_users", Conf), + [ list_to_binary(U) || {["loopback_users", U], V} <- Settings, V == true ] + end +end}. + +%% Configuring SSL. +%% See http://www.rabbitmq.com/ssl.html for full documentation. +%% +%% {ssl_options, [{cacertfile, "/path/to/testca/cacert.pem"}, +%% {certfile, "/path/to/server/cert.pem"}, +%% {keyfile, "/path/to/server/key.pem"}, +%% {verify, verify_peer}, +%% {fail_if_no_peer_cert, false}]}, + +%% SSL options section ======================================================== + +{mapping, "ssl_allow_poodle_attack", "rabbit.ssl_allow_poodle_attack", +[{datatype, {enum, [true, false]}}]}. + +{mapping, "ssl_options", "rabbit.ssl_options", [ + {datatype, {enum, [none]}} +]}. + +{translation, "rabbit.ssl_options", +fun(Conf) -> + case cuttlefish:conf_get("ssl_options", Conf, undefined) of + none -> []; + _ -> cuttlefish:invalid("Invalid ssl_options") + end +end}. + +{mapping, "ssl_options.verify", "rabbit.ssl_options.verify", [ + {datatype, {enum, [verify_peer, verify_none]}}]}. + +{mapping, "ssl_options.fail_if_no_peer_cert", "rabbit.ssl_options.fail_if_no_peer_cert", [ + {datatype, {enum, [true, false]}}]}. + +{mapping, "ssl_options.cacertfile", "rabbit.ssl_options.cacertfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. + +{mapping, "ssl_options.certfile", "rabbit.ssl_options.certfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. + +{mapping, "ssl_options.cacerts.$name", "rabbit.ssl_options.cacerts", + [{datatype, string}]}. + +{translation, "rabbit.ssl_options.cacerts", +fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("ssl_options.cacerts", Conf), + [ list_to_binary(V) || {_, V} <- Settings ] +end}. + +{mapping, "ssl_options.cert", "rabbit.ssl_options.cert", + [{datatype, string}]}. + +{translation, "rabbit.ssl_options.cert", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("ssl_options.cert", Conf)) +end}. + +{mapping, "ssl_options.client_renegotiation", "rabbit.ssl_options.client_renegotiation", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "ssl_options.crl_check", "rabbit.ssl_options.crl_check", + [{datatype, [{enum, [true, false, peer, best_effort]}]}]}. + +{mapping, "ssl_options.depth", "rabbit.ssl_options.depth", + [{datatype, integer}, {validators, ["byte"]}]}. + +{mapping, "ssl_options.dh", "rabbit.ssl_options.dh", + [{datatype, string}]}. + +{translation, "rabbit.ssl_options.dh", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("ssl_options.dh", Conf)) +end}. + +{mapping, "ssl_options.dhfile", "rabbit.ssl_options.dhfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. + +{mapping, "ssl_options.honor_cipher_order", "rabbit.ssl_options.honor_cipher_order", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "ssl_options.key.RSAPrivateKey", "rabbit.ssl_options.key", + [{datatype, string}]}. + +{mapping, "ssl_options.key.DSAPrivateKey", "rabbit.ssl_options.key", + [{datatype, string}]}. + +{mapping, "ssl_options.key.PrivateKeyInfo", "rabbit.ssl_options.key", + [{datatype, string}]}. + +{translation, "rabbit.ssl_options.key", +fun(Conf) -> + case cuttlefish_variable:filter_by_prefix("ssl_options.key", Conf) of + [{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)}; + _ -> undefined + end +end}. + +{mapping, "ssl_options.keyfile", "rabbit.ssl_options.keyfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. + +{mapping, "ssl_options.log_alert", "rabbit.ssl_options.log_alert", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "ssl_options.password", "rabbit.ssl_options.password", + [{datatype, string}]}. + +{mapping, "ssl_options.psk_identity", "rabbit.ssl_options.psk_identity", + [{datatype, string}]}. + +{mapping, "ssl_options.reuse_sessions", "rabbit.ssl_options.reuse_sessions", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "ssl_options.secure_renegotiate", "rabbit.ssl_options.secure_renegotiate", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "ssl_options.versions.$version", "rabbit.ssl_options.versions", + [{datatype, atom}]}. + +{translation, "rabbit.ssl_options.versions", +fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("ssl_options.versions", Conf), + [ V || {_, V} <- Settings ] +end}. + +%% =========================================================================== + +%% Choose the available SASL mechanism(s) to expose. +%% The two default (built in) mechanisms are 'PLAIN' and +%% 'AMQPLAIN'. Additional mechanisms can be added via +%% plugins. +%% +%% See http://www.rabbitmq.com/authentication.html for more details. +%% +%% {auth_mechanisms, ['PLAIN', 'AMQPLAIN']}, + +{mapping, "auth_mechanisms.$name", "rabbit.auth_mechanisms", [ + {datatype, atom}]}. + +{translation, "rabbit.auth_mechanisms", +fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("auth_mechanisms", Conf), + [ V || {_, V} <- Settings ] +end}. + + +%% Select an authentication backend to use. RabbitMQ provides an +%% internal backend in the core. +%% +%% {auth_backends, [rabbit_auth_backend_internal]}, + +{translation, "rabbit.auth_backends", +fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("auth_backends", Conf), + BackendModule = fun + (internal) -> rabbit_auth_backend_internal; + (ldap) -> rabbit_auth_backend_ldap; + (http) -> rabbit_auth_backend_http; + (amqp) -> rabbit_auth_backend_amqp; + (dummy) -> rabbit_auth_backend_dummy; + (Other) when is_atom(Other) -> Other; + (_) -> cuttlefish:invalid("Unknown/unsupported auth backend") + end, + AuthBackends = [{Num, {default, BackendModule(V)}} || {["auth_backends", Num], V} <- Settings], + AuthNBackends = [{Num, {authn, BackendModule(V)}} || {["auth_backends", Num, "authn"], V} <- Settings], + AuthZBackends = [{Num, {authz, BackendModule(V)}} || {["auth_backends", Num, "authz"], V} <- Settings], + Backends = lists:foldl( + fun({NumStr, {Type, V}}, Acc) -> + Num = case catch list_to_integer(NumStr) of + N when is_integer(N) -> N; + Err -> + cuttlefish:invalid( + iolist_to_binary(io_lib:format( + "Auth backend position in the chain should be an integer ~p", [Err]))) + end, + NewVal = case dict:find(Num, Acc) of + {ok, {AuthN, AuthZ}} -> + case {Type, AuthN, AuthZ} of + {authn, undefined, _} -> + {V, AuthZ}; + {authz, _, undefined} -> + {AuthN, V}; + _ -> + cuttlefish:invalid( + iolist_to_binary( + io_lib:format( + "Auth backend already defined for the ~pth ~p backend", + [Num, Type]))) + end; + error -> + case Type of + authn -> {V, undefined}; + authz -> {undefined, V}; + default -> {V, V} + end + end, + dict:store(Num, NewVal, Acc) + end, + dict:new(), + AuthBackends ++ AuthNBackends ++ AuthZBackends), + lists:map( + fun + ({Num, {undefined, AuthZ}}) -> + cuttlefish:warn( + io_lib:format( + "Auth backend undefined for the ~pth authz backend. Using ~p", + [Num, AuthZ])), + {AuthZ, AuthZ}; + ({Num, {AuthN, undefined}}) -> + cuttlefish:warn( + io_lib:format( + "Authz backend undefined for the ~pth authn backend. Using ~p", + [Num, AuthN])), + {AuthN, AuthN}; + ({_Num, {Auth, Auth}}) -> Auth; + ({_Num, {AuthN, AuthZ}}) -> {AuthN, AuthZ} + end, + lists:keysort(1, dict:to_list(Backends))) +end}. + +{mapping, "auth_backends.$num", "rabbit.auth_backends", [ + {datatype, atom} +]}. + +{mapping, "auth_backends.$num.authn", "rabbit.auth_backends",[ + {datatype, atom} +]}. + +{mapping, "auth_backends.$num.authz", "rabbit.auth_backends",[ + {datatype, atom} +]}. + +%% This pertains to both the rabbitmq_auth_mechanism_ssl plugin and +%% STOMP ssl_cert_login configurations. See the rabbitmq_stomp +%% configuration section later in this file and the README in +%% https://github.com/rabbitmq/rabbitmq-auth-mechanism-ssl for further +%% details. +%% +%% To use the SSL cert's CN instead of its DN as the username +%% +%% {ssl_cert_login_from, common_name}, + +{mapping, "ssl_cert_login_from", "rabbit.ssl_cert_login_from", [ + {datatype, {enum, [distinguished_name, common_name]}} +]}. + +%% SSL handshake timeout, in milliseconds. +%% +%% {ssl_handshake_timeout, 5000}, + +{mapping, "ssl_handshake_timeout", "rabbit.ssl_handshake_timeout", [ + {datatype, integer} +]}. + +%% Password hashing implementation. Will only affect newly +%% created users. To recalculate hash for an existing user +%% it's necessary to update her password. +%% +%% When importing definitions exported from versions earlier +%% than 3.6.0, it is possible to go back to MD5 (only do this +%% as a temporary measure!) by setting this to rabbit_password_hashing_md5. +%% +%% To use SHA-512, set to rabbit_password_hashing_sha512. +%% +%% {password_hashing_module, rabbit_password_hashing_sha256}, + +{mapping, "password_hashing_module", "rabbit.password_hashing_module", [ + {datatype, atom} +]}. + +%% +%% Default User / VHost +%% ==================== +%% + +%% On first start RabbitMQ will create a vhost and a user. These +%% config items control what gets created. See +%% http://www.rabbitmq.com/access-control.html for further +%% information about vhosts and access control. +%% +%% {default_vhost, <<"/">>}, +%% {default_user, <<"guest">>}, +%% {default_pass, <<"guest">>}, +%% {default_permissions, [<<".*">>, <<".*">>, <<".*">>]}, + +{mapping, "default_vhost", "rabbit.default_vhost", [ + {datatype, string} +]}. + +{translation, "rabbit.default_vhost", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("default_vhost", Conf)) +end}. + +{mapping, "default_user", "rabbit.default_user", [ + {datatype, string} +]}. + +{translation, "rabbit.default_user", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("default_user", Conf)) +end}. + +{mapping, "default_pass", "rabbit.default_pass", [ + {datatype, string} +]}. + +{translation, "rabbit.default_pass", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("default_pass", Conf)) +end}. + +{mapping, "default_permissions.configure", "rabbit.default_permissions", [ + {datatype, string} +]}. + +{mapping, "default_permissions.read", "rabbit.default_permissions", [ + {datatype, string} +]}. + +{mapping, "default_permissions.write", "rabbit.default_permissions", [ + {datatype, string} +]}. + +{translation, "rabbit.default_permissions", +fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("default_permissions", Conf), + Configure = proplists:get_value(["default_permissions", "configure"], Settings), + Read = proplists:get_value(["default_permissions", "read"], Settings), + Write = proplists:get_value(["default_permissions", "write"], Settings), + [list_to_binary(Configure), list_to_binary(Read), list_to_binary(Write)] +end}. + +%% Tags for default user +%% +%% For more details about tags, see the documentation for the +%% Management Plugin at http://www.rabbitmq.com/management.html. +%% +%% {default_user_tags, [administrator]}, + +{mapping, "default_user_tags.$tag", "rabbit.default_user_tags", + [{datatype, {enum, [true, false]}}]}. + +{translation, "rabbit.default_user_tags", +fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("default_user_tags", Conf), + [ list_to_atom(Key) || {[_,Key], Val} <- Settings, Val == true ] +end}. + +%% +%% Additional network and protocol related configuration +%% ===================================================== +%% + +%% Set the default AMQP heartbeat delay (in seconds). +%% +%% {heartbeat, 600}, + +{mapping, "heartbeat", "rabbit.heartbeat", [{datatype, integer}]}. + +%% Set the max permissible size of an AMQP frame (in bytes). +%% +%% {frame_max, 131072}, + +{mapping, "frame_max", "rabbit.frame_max", [{datatype, bytesize}]}. + +%% Set the max frame size the server will accept before connection +%% tuning occurs +%% +%% {initial_frame_max, 4096}, + +{mapping, "initial_frame_max", "rabbit.initial_frame_max", [{datatype, bytesize}]}. + +%% Set the max permissible number of channels per connection. +%% 0 means "no limit". +%% +%% {channel_max, 128}, + +{mapping, "channel_max", "rabbit.channel_max", [{datatype, integer}]}. + +%% Customising Socket Options. +%% +%% See (http://www.erlang.org/doc/man/inet.html#setopts-2) for +%% further documentation. +%% +%% {tcp_listen_options, [{backlog, 128}, +%% {nodelay, true}, +%% {exit_on_close, false}]}, + +%% TCP listener section ====================================================== + +{mapping, "tcp_listen_options", "rabbit.tcp_listen_options", [ + {datatype, {enum, [none]}}]}. + +{translation, "rabbit.tcp_listen_options", +fun(Conf) -> + case cuttlefish:conf_get("tcp_listen_options", undefined) of + none -> []; + _ -> cuttlefish:invalid("Invalid tcp_listen_options") + end +end}. + +{mapping, "tcp_listen_options.backlog", "rabbit.tcp_listen_options.backlog", [ + {datatype, integer} +]}. + +{mapping, "tcp_listen_options.nodelay", "rabbit.tcp_listen_options.nodelay", [ + {datatype, {enum, [true, false]}} +]}. + +{mapping, "tcp_listen_options.buffer", "rabbit.tcp_listen_options.buffer", + [{datatype, integer}]}. + +{mapping, "tcp_listen_options.delay_send", "rabbit.tcp_listen_options.delay_send", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "tcp_listen_options.dontroute", "rabbit.tcp_listen_options.dontroute", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "tcp_listen_options.exit_on_close", "rabbit.tcp_listen_options.exit_on_close", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "tcp_listen_options.fd", "rabbit.tcp_listen_options.fd", + [{datatype, integer}]}. + +{mapping, "tcp_listen_options.high_msgq_watermark", "rabbit.tcp_listen_options.high_msgq_watermark", + [{datatype, integer}]}. + +{mapping, "tcp_listen_options.high_watermark", "rabbit.tcp_listen_options.high_watermark", + [{datatype, integer}]}. + +{mapping, "tcp_listen_options.keepalive", "rabbit.tcp_listen_options.keepalive", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "tcp_listen_options.low_msgq_watermark", "rabbit.tcp_listen_options.low_msgq_watermark", + [{datatype, integer}]}. + +{mapping, "tcp_listen_options.low_watermark", "rabbit.tcp_listen_options.low_watermark", + [{datatype, integer}]}. + +{mapping, "tcp_listen_options.port", "rabbit.tcp_listen_options.port", + [{datatype, integer}, {validators, ["port"]}]}. + +{mapping, "tcp_listen_options.priority", "rabbit.tcp_listen_options.priority", + [{datatype, integer}]}. + +{mapping, "tcp_listen_options.recbuf", "rabbit.tcp_listen_options.recbuf", + [{datatype, integer}]}. + +{mapping, "tcp_listen_options.send_timeout", "rabbit.tcp_listen_options.send_timeout", + [{datatype, integer}]}. + +{mapping, "tcp_listen_options.send_timeout_close", "rabbit.tcp_listen_options.send_timeout_close", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "tcp_listen_options.sndbuf", "rabbit.tcp_listen_options.sndbuf", + [{datatype, integer}]}. + +{mapping, "tcp_listen_options.tos", "rabbit.tcp_listen_options.tos", + [{datatype, integer}]}. + +%% ========================================================================== + +%% +%% Resource Limits & Flow Control +%% ============================== +%% +%% See http://www.rabbitmq.com/memory.html for full details. + +%% Memory-based Flow Control threshold. +%% +%% {vm_memory_high_watermark, 0.4}, + +%% Alternatively, we can set a limit (in bytes) of RAM used by the node. +%% +%% {vm_memory_high_watermark, {absolute, 1073741824}}, +%% +%% Or you can set absolute value using memory units (with RabbitMQ 3.6.0+). +%% +%% {vm_memory_high_watermark, {absolute, "1024M"}}, +%% +%% Supported units suffixes: +%% +%% kb, KB: kibibytes (2^10 bytes) +%% mb, MB: mebibytes (2^20) +%% gb, GB: gibibytes (2^30) + +{mapping, "vm_memory_high_watermark.relative", "rabbit.vm_memory_high_watermark", [ + {datatype, float}]}. + +{mapping, "vm_memory_high_watermark.absolute", "rabbit.vm_memory_high_watermark", [ + {datatype, [integer, string]}]}. + + +{translation, "rabbit.vm_memory_high_watermark", +fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("vm_memory_high_watermark", Conf), + Absolute = proplists:get_value(["vm_memory_high_watermark", "absolute"], Settings), + Relative = proplists:get_value(["vm_memory_high_watermark", "relative"], Settings), + case {Absolute, Relative} of + {undefined, undefined} -> cuttlefish:invalid("No vm watermark defined"); + {_, undefined} -> {absolute, Absolute}; + _ -> Relative + end +end}. + +%% Fraction of the high watermark limit at which queues start to +%% page message out to disc in order to free up memory. +%% +%% Values greater than 0.9 can be dangerous and should be used carefully. +%% +%% {vm_memory_high_watermark_paging_ratio, 0.5}, + +{mapping, "vm_memory_high_watermark_paging_ratio", + "rabbit.vm_memory_high_watermark_paging_ratio", + [{datatype, float}, {validators, ["less_than_1"]}]}. + +%% Interval (in milliseconds) at which we perform the check of the memory +%% levels against the watermarks. +%% +%% {memory_monitor_interval, 2500}, + +{mapping, "memory_monitor_interval", "rabbit.memory_monitor_interval", + [{datatype, integer}]}. + +%% Set disk free limit (in bytes). Once free disk space reaches this +%% lower bound, a disk alarm will be set - see the documentation +%% listed above for more details. +%% +%% {disk_free_limit, 50000000}, +%% +%% Or you can set it using memory units (same as in vm_memory_high_watermark) +%% with RabbitMQ 3.6.0+. +%% {disk_free_limit, "50MB"}, +%% {disk_free_limit, "50000kB"}, +%% {disk_free_limit, "2GB"}, + +%% Alternatively, we can set a limit relative to total available RAM. +%% +%% Values lower than 1.0 can be dangerous and should be used carefully. +%% {disk_free_limit, {mem_relative, 2.0}}, + +{mapping, "disk_free_limit.relative", "rabbit.disk_free_limit", [ + {datatype, float}]}. + +{mapping, "disk_free_limit.absolute", "rabbit.disk_free_limit", [ + {datatype, [integer, string]}]}. + + +{translation, "rabbit.disk_free_limit", +fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("disk_free_limit", Conf), + Absolute = proplists:get_value(["disk_free_limit", "absolute"], Settings), + Relative = proplists:get_value(["disk_free_limit", "relative"], Settings), + case {Absolute, Relative} of + {undefined, undefined} -> cuttlefish:invalid("No disk limit defined"); + {_, undefined} -> Absolute; + _ -> {mem_relative, Relative} + end +end}. + +%% +%% Clustering +%% ===================== +%% + +%% How to respond to cluster partitions. +%% See http://www.rabbitmq.com/partitions.html for further details. +%% +%% {cluster_partition_handling, ignore}, + +{mapping, "cluster_partition_handling", "rabbit.cluster_partition_handling", + [{datatype, {enum, [ignore, pause_minority, autoheal, pause_if_all_down]}}]}. + +{mapping, "cluster_partition_handling.pause_if_all_down.recover", + "rabbit.cluster_partition_handling", + [{datatype, {enum, [ignore, autoheal]}}]}. + +{mapping, "cluster_partition_handling.pause_if_all_down.nodes.$name", + "rabbit.cluster_partition_handling", + [{datatype, atom}]}. + +{translation, "rabbit.cluster_partition_handling", +fun(Conf) -> + case cuttlefish:conf_get("cluster_partition_handling", Conf) of + pause_if_all_down -> + PauseIfAllDownNodes = cuttlefish_variable:filter_by_prefix( + "cluster_partition_handling.pause_if_all_down.nodes", + Conf), + case PauseIfAllDownNodes of + [] -> + cuttlefish:invalid("Nodes required for pause_if_all_down"); + _ -> + Nodes = [ V || {K,V} <- PauseIfAllDownNodes ], + PauseIfAllDownRecover = cuttlefish:conf_get( + "cluster_partition_handling.pause_if_all_down.recover", + Conf), + case PauseIfAllDownRecover of + Recover when Recover == ignore; Recover == autoheal -> + {pause_if_all_down, Nodes, Recover}; + Invalid -> + cuttlefish:invalid("Recover strategy required for pause_if_all_down") + end + end; + Other -> Other + end +end}. + +%% Mirror sync batch size, in messages. Increasing this will speed +%% up syncing but total batch size in bytes must not exceed 2 GiB. +%% Available in RabbitMQ 3.6.0 or later. +%% +%% {mirroring_sync_batch_size, 4096}, + +{mapping, "mirroring_sync_batch_size", "rabbit.mirroring_sync_batch_size", + [{datatype, bytesize}, {validators, ["size_less_than_2G"]}]}. + +%% Make clustering happen *automatically* at startup - only applied +%% to nodes that have just been reset or started for the first time. +%% See http://www.rabbitmq.com/clustering.html#auto-config for +%% further details. +%% +%% {cluster_nodes, {['rabbit@my.host.com'], disc}}, + +{mapping, "cluster_nodes.disc.$node", "rabbit.cluster_nodes", + [{datatype, atom}]}. + +{mapping, "cluster_nodes.ram.$node", "rabbit.cluster_nodes", + [{datatype, atom}]}. + +{translation, "rabbit.cluster_nodes", +fun(Conf) -> + DiskNodes = [ V || {_, V} <- cuttlefish_variable:filter_by_prefix("cluster_nodes.disc", Conf)], + RamNodes = [ V || {_, V} <- cuttlefish_variable:filter_by_prefix("cluster_nodes.ram", Conf)], + + case {DiskNodes, RamNodes} of + {_, []} -> {DiskNodes, disc}; + {[], _} -> {RamNodes, ram} + end +end}. + + +%% Interval (in milliseconds) at which we send keepalive messages +%% to other cluster members. Note that this is not the same thing +%% as net_ticktime; missed keepalive messages will not cause nodes +%% to be considered down. +%% +%% {cluster_keepalive_interval, 10000}, + +{mapping, "cluster_keepalive_interval", "rabbit.cluster_keepalive_interval", + [{datatype, integer}]}. + + +{mapping, "queue_master_locator", "rabbit.queue_master_locator", + [{datatype, string}]}. + +{translation, "rabbit.queue_master_locator", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("queue_master_locator", Conf)) +end}. + +%% +%% Statistics Collection +%% ===================== +%% + +%% Set (internal) statistics collection granularity. +%% +%% {collect_statistics, none}, + +{mapping, "collect_statistics", "rabbit.collect_statistics", + [{datatype, {enum, [none, coarse, fine]}}]}. + +%% Statistics collection interval (in milliseconds). Increasing +%% this will reduce the load on management database. +%% +%% {collect_statistics_interval, 5000}, + +{mapping, "collect_statistics_interval", "rabbit.collect_statistics_interval", + [{datatype, integer}]}. + +%% +%% Misc/Advanced Options +%% ===================== +%% +%% NB: Change these only if you understand what you are doing! +%% + +%% Explicitly enable/disable hipe compilation. +%% +%% {hipe_compile, true}, + +{mapping, "hipe_compile", "rabbit.hipe_compile", + [{datatype, {enum, [true, false]}}]}. + +%% Timeout used when waiting for Mnesia tables in a cluster to +%% become available. +%% +%% {mnesia_table_loading_timeout, 30000}, + +{mapping, "mnesia_table_loading_timeout", "rabbit.mnesia_table_loading_timeout", + [{datatype, integer}]}. + +%% Size in bytes below which to embed messages in the queue index. See +%% http://www.rabbitmq.com/persistence-conf.html +%% +%% {queue_index_embed_msgs_below, 4096} + +{mapping, "queue_index_embed_msgs_below", "rabbit.queue_index_embed_msgs_below", + [{datatype, bytesize}]}. + +% ========================== +% Lager section +% ========================== + +{mapping, "log.dir", "lager.log_root", [ + {datatype, string}, + {validators, ["dir_writable"]}]}. + +{mapping, "log.console", "lager.handlers", [ + {datatype, {enum, [true, false]}} +]}. + +{mapping, "log.syslog", "lager.handlers", [ + {datatype, {enum, [true, false]}} +]}. +{mapping, "log.file", "lager.handlers", [ + {datatype, [{enum, [false]}, string]} +]}. + +{mapping, "log.file.level", "lager.handlers", [ + {datatype, {enum, [debug, info, warning, error]}} +]}. +{mapping, "log.$handler.level", "lager.handlers", [ + {datatype, {enum, [debug, info, warning, error]}} +]}. +{mapping, "log.file.rotation.date", "lager.handlers", [ + {datatype, string} +]}. +{mapping, "log.file.rotation.size", "lager.handlers", [ + {datatype, integer} +]}. +{mapping, "log.file.rotation.count", "lager.handlers", [ + {datatype, integer} +]}. + +{mapping, "log.syslog.identity", "lager.handlers", [ + {datatype, string} +]}. +{mapping, "log.syslog.facility", "lager.handlers", [ + {datatype, atom} +]}. + +{translation, "lager.handlers", +fun(Conf) -> + ConsoleHandler = case cuttlefish:conf_get("log.console", Conf, false) of + true -> + ConsoleLevel = cuttlefish:conf_get("log.console.level", Conf, info), + [{lager_console_backend, ConsoleLevel}]; + false -> [] + end, + FileHandler = case cuttlefish:conf_get("log.file", Conf, false) of + false -> []; + File -> + FileLevel = cuttlefish:conf_get("log.file.level", Conf, info), + RotationDate = cuttlefish:conf_get("log.file.rotation.date", Conf, ""), + RotationSize = cuttlefish:conf_get("log.file.rotation.size", Conf, 0), + RotationCount = cuttlefish:conf_get("log.file.rotation.count", Conf, 10), + [{lager_file_backend, [{file, File}, + {level, FileLevel}, + {date, RotationDate}, + {size, RotationSize}, + {count, RotationCount}]}] + end, + SyslogHandler = case cuttlefish:conf_get("log.syslog", Conf, false) of + false -> []; + true -> + SyslogLevel = cuttlefish:conf_get("log.syslog.level", Conf, info), + Identity = cuttlefish:conf_get("log.syslog.identity", Conf), + Facility = cuttlefish:conf_get("log.syslog.facility", Conf), + [{lager_syslog_backend, [Identity, Facility, SyslogLevel]}] + end, + case ConsoleHandler ++ FileHandler ++ SyslogHandler of + [] -> undefined; + Other -> Other + end +end}. + + +% =============================== +% Validators +% =============================== + +{validator, "size_less_than_2G", "Byte size should be less than 2G and greater than 0", +fun(Size) when is_integer(Size) -> + Size > 0 andalso Size < 2147483648 +end}. + +{validator, "less_than_1", "Flooat is not beetween 0 and 1", +fun(Float) when is_float(Float) -> + Float > 0 andalso Float < 1 +end}. + +{validator, "port", "Invalid port number", +fun(Port) when is_integer(Port) -> + Port > 0 andalso Port < 65535 +end}. + +{validator, "byte", "Integer is not 0 + Int > 0 andalso Int < 255 +end}. + +{validator, "dir_writable", "Cannot create file in dir", +fun(Dir) -> + TestFile = filename:join(Dir, "test_file"), + file:delete(TestFile), + Res = ok == file:write_file(TestFile, <<"test">>), + file:delete(TestFile), + Res +end}. + +{validator, "file_accessible", "file doesnt exist or unaccessible", +fun(File) -> + ReadFile = file:read_file_info(File), + element(1, ReadFile) == ok +end}. + +{validator, "is_ip", "string is a valid IP address", +fun(IpStr) -> + Res = inet:parse_address(IpStr), + element(1, Res) == ok +end}. diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_amqp1_0.schema b/test/config_schema_SUITE_data/schema/rabbitmq_amqp1_0.schema new file mode 100644 index 000000000000..e6cfb6826228 --- /dev/null +++ b/test/config_schema_SUITE_data/schema/rabbitmq_amqp1_0.schema @@ -0,0 +1,31 @@ +%% ---------------------------------------------------------------------------- +%% RabbitMQ AMQP 1.0 Support +%% +%% See https://github.com/rabbitmq/rabbitmq-amqp1.0/blob/stable/README.md +%% for details +%% ---------------------------------------------------------------------------- + +% {rabbitmq_amqp1_0,[ +%% Connections that are not authenticated with SASL will connect as this +%% account. See the README for more information. +%% +%% Please note that setting this will allow clients to connect without +%% authenticating! +%% +%% {default_user, "guest"}, +{mapping, "amqp1_0.default_user", "rabbitmq_amqp1_0.default_user", + [{datatype, [{enum, [none]}, string]}]}. +%% Enable protocol strict mode. See the README for more information. +%% +%% {protocol_strict_mode, false} +% ]}, +{mapping, "amqp1_0.protocol_strict_mode", "rabbitmq_amqp1_0.protocol_strict_mode", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "amqp1_0.default_vhost", "rabbitmq_amqp1_0.default_vhost", + [{datatype, string}]}. + +{translation , "rabbitmq_amqp1_0.default_vhost", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("amqp1_0.default_vhost", Conf)) +end}. \ No newline at end of file diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_amqp.schema b/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_amqp.schema new file mode 100644 index 000000000000..a30efb6c03b3 --- /dev/null +++ b/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_amqp.schema @@ -0,0 +1,27 @@ +{mapping, "rabbitmq_auth_backend_amqp.username", "rabbitmq_auth_backend_amqp.username", + [{datatype, string}]}. + +{translation, "rabbitmq_auth_backend_amqp.username", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("rabbitmq_auth_backend_amqp.username", Conf)) +end}. + +{mapping, "rabbitmq_auth_backend_amqp.vhost", "rabbitmq_auth_backend_amqp.vhost", + [{datatype, string}]}. + +{translation, "rabbitmq_auth_backend_amqp.vhost", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("rabbitmq_auth_backend_amqp.vhost", Conf)) +end}. + +{mapping, "rabbitmq_auth_backend_amqp.exchange", "rabbitmq_auth_backend_amqp.exchange", + [{datatype, string}]}. + +{translation, "rabbitmq_auth_backend_amqp.exchange", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("rabbitmq_auth_backend_amqp.exchange", Conf)) +end}. + + +{mapping, "rabbitmq_auth_backend_amqp.timeout", "rabbitmq_auth_backend_amqp.timeout", + [{datatype, [{enum, [infinity]}, integer]}]}. diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_http.schema b/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_http.schema new file mode 100644 index 000000000000..f10eb6710be5 --- /dev/null +++ b/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_http.schema @@ -0,0 +1,15 @@ + +%% ========================================================================== +%% ---------------------------------------------------------------------------- +%% RabbitMQ HTTP Authorization +%% +%% ---------------------------------------------------------------------------- + +{mapping, "rabbitmq_auth_backend_http.user_path", "rabbitmq_auth_backend_http.user_path", + [{datatype, string}, {validators, ["uri"]}]}. + +{mapping, "rabbitmq_auth_backend_http.vhost_path", "rabbitmq_auth_backend_http.vhost_path", + [{datatype, string}, {validators, ["uri"]}]}. + +{mapping, "rabbitmq_auth_backend_http.resource_path", "rabbitmq_auth_backend_http.resource_path", + [{datatype, string}, {validators, ["uri"]}]}. diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_ldap.schema b/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_ldap.schema new file mode 100644 index 000000000000..334fd014c1f4 --- /dev/null +++ b/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_ldap.schema @@ -0,0 +1,183 @@ +%% ---------------------------------------------------------------------------- +%% RabbitMQ LDAP Plugin +%% +%% See http://www.rabbitmq.com/ldap.html for details. +%% +%% ---------------------------------------------------------------------------- + +% {rabbitmq_auth_backend_ldap, +% [ +%% +%% Connecting to the LDAP server(s) +%% ================================ +%% + +%% Specify servers to bind to. You *must* set this in order for the plugin +%% to work properly. +%% +%% {servers, ["your-server-name-goes-here"]}, + +{mapping, "rabbitmq_auth_backend_ldap.servers", "rabbitmq_auth_backend_ldap.servers", + [{datatype, {enum, [none]}}]}. + +{mapping, "rabbitmq_auth_backend_ldap.servers.$server", "rabbitmq_auth_backend_ldap.servers", + [{datatype, string}]}. + +{translation, "rabbitmq_auth_backend_ldap.servers", +fun(Conf) -> + case cuttlefish:conf_get("rabbitmq_auth_backend_ldap.servers", Conf, undefined) of + none -> []; + _ -> + Settings = cuttlefish_variable:filter_by_prefix("rabbitmq_auth_backend_ldap.servers", Conf), + [ V || {_, V} <- Settings ] + end +end}. + +%% Connect to the LDAP server using SSL +%% +%% {use_ssl, false}, + +{mapping, "rabbitmq_auth_backend_ldap.use_ssl", "rabbitmq_auth_backend_ldap.use_ssl", + [{datatype, {enum, [true, false]}}]}. + +%% Specify the LDAP port to connect to +%% +%% {port, 389}, + +{mapping, "rabbitmq_auth_backend_ldap.port", "rabbitmq_auth_backend_ldap.port", + [{datatype, integer}]}. + +%% LDAP connection timeout, in milliseconds or 'infinity' +%% +%% {timeout, infinity}, + +{mapping, "rabbitmq_auth_backend_ldap.timeout", "rabbitmq_auth_backend_ldap.timeout", + [{datatype, [integer, {atom, infinity}]}]}. + +%% Enable logging of LDAP queries. +%% One of +%% - false (no logging is performed) +%% - true (verbose logging of the logic used by the plugin) +%% - network (as true, but additionally logs LDAP network traffic) +%% +%% Defaults to false. +%% +%% {log, false}, + +{mapping, "rabbitmq_auth_backend_ldap.log", "rabbitmq_auth_backend_ldap.log", + [{datatype, {enum, [true, false, network]}}]}. + +%% +%% Authentication +%% ============== +%% + +%% Pattern to convert the username given through AMQP to a DN before +%% binding +%% +%% {user_dn_pattern, "cn=${username},ou=People,dc=example,dc=com"}, + +{mapping, "rabbitmq_auth_backend_ldap.user_dn_pattern", "rabbitmq_auth_backend_ldap.user_dn_pattern", + [{datatype, string}]}. + +%% Alternatively, you can convert a username to a Distinguished +%% Name via an LDAP lookup after binding. See the documentation for +%% full details. + +%% When converting a username to a dn via a lookup, set these to +%% the name of the attribute that represents the user name, and the +%% base DN for the lookup query. +%% +%% {dn_lookup_attribute, "userPrincipalName"}, +%% {dn_lookup_base, "DC=gopivotal,DC=com"}, + +{mapping, "rabbitmq_auth_backend_ldap.dn_lookup_attribute", "rabbitmq_auth_backend_ldap.dn_lookup_attribute", + [{datatype, [{enum, [none]}, string]}]}. + +{mapping, "rabbitmq_auth_backend_ldap.dn_lookup_base", "rabbitmq_auth_backend_ldap.dn_lookup_base", + [{datatype, [{enum, [none]}, string]}]}. + +{mapping, "rabbitmq_auth_backend_ldap.dn_lookup_bind", "rabbitmq_auth_backend_ldap.dn_lookup_bind", + [{datatype, [{enum, [as_user]}]}]}. + +{mapping, "rabbitmq_auth_backend_ldap.dn_lookup_bind.user_dn", "rabbitmq_auth_backend_ldap.dn_lookup_bind", + [{datatype, [string]}]}. + +{mapping, "rabbitmq_auth_backend_ldap.dn_lookup_bind.password", "rabbitmq_auth_backend_ldap.dn_lookup_bind", + [{datatype, [string]}]}. + +{translation, "rabbitmq_auth_backend_ldap.dn_lookup_bind", +fun(Conf) -> + case cuttlefish:conf_get("rabbitmq_auth_backend_ldap.dn_lookup_bind", Conf, undefined) of + as_user -> as_user; + _ -> + User = cuttlefish:conf_get("rabbitmq_auth_backend_ldap.dn_lookup_bind.user_dn", Conf), + Pass = cuttlefish:conf_get("rabbitmq_auth_backend_ldap.dn_lookup_bind.password", Conf), + case {User, Pass} of + {undefined, _} -> as_user; + {_, undefined} -> as_user; + _ -> {User, Pass} + end + end +end}. + +%% Controls how to bind for authorisation queries and also to +%% retrieve the details of users logging in without presenting a +%% password (e.g., SASL EXTERNAL). +%% One of +%% - as_user (to bind as the authenticated user - requires a password) +%% - anon (to bind anonymously) +%% - {UserDN, Password} (to bind with a specified user name and password) +%% +%% Defaults to 'as_user'. +%% +%% {other_bind, as_user}, + +{mapping, "rabbitmq_auth_backend_ldap.other_bind", "rabbitmq_auth_backend_ldap.other_bind", + [{datatype, {enum, [as_user, anon]}}]}. + +{mapping, "rabbitmq_auth_backend_ldap.other_bind.user_dn", "rabbitmq_auth_backend_ldap.other_bind", + [{datatype, string}]}. + +{mapping, "rabbitmq_auth_backend_ldap.other_bind.password", "rabbitmq_auth_backend_ldap.other_bind", + [{datatype, string}]}. + +{translation, "rabbitmq_auth_backend_ldap.other_bind", +fun(Conf) -> + case cuttlefish:conf_get("rabbitmq_auth_backend_ldap.other_bind", Conf, undefined) of + as_user -> as_user; + anon -> anon; + _ -> + User = cuttlefish:conf_get("rabbitmq_auth_backend_ldap.other_bind.user_dn", Conf), + Pass = cuttlefish:conf_get("rabbitmq_auth_backend_ldap.other_bind.password", Conf), + case {User, Pass} of + {undefined, _} -> as_user; + {_, undefined} -> as_user; + _ -> {User, Pass} + end + end +end}. + +%% +%% Authorisation +%% ============= +%% + +%% The LDAP plugin can perform a variety of queries against your +%% LDAP server to determine questions of authorisation. See +%% http://www.rabbitmq.com/ldap.html#authorisation for more +%% information. + +%% Set the query to use when determining vhost access +%% +%% {vhost_access_query, {in_group, +%% "ou=${vhost}-users,ou=vhosts,dc=example,dc=com"}}, + +%% Set the query to use when determining resource (e.g., queue) access +%% +%% {resource_access_query, {constant, true}}, + +%% Set queries to determine which tags a user has +%% +%% {tag_queries, []} +% ]}, diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_clusterer.schema b/test/config_schema_SUITE_data/schema/rabbitmq_clusterer.schema new file mode 100644 index 000000000000..ba127f00c1a8 --- /dev/null +++ b/test/config_schema_SUITE_data/schema/rabbitmq_clusterer.schema @@ -0,0 +1,58 @@ +{mapping, "clusterer.config", "rabbitmq_clusterer.config", + [{datatype, string}, {validators, ["file_accessible"]}]}. + +{translation, "rabbitmq_clusterer.config", +fun(Conf) -> + case cuttlefish:conf_get("clusterer.config", Conf, undefined) of + String when is_list(String) -> + case cuttlefish_variable:filter_by_prefix("clusterer", Conf) of + [{["clusterer", "config"], String}] -> String; + _ -> cuttlefish:invalid("Config for clusterer defined in "++ + String ++ " file. " ++ + "All other clusterer configurations should be removed") + end; + _ -> [] + end +end}. + +{mapping, "clusterer.version", "rabbitmq_clusterer.config.version", + [{datatype, integer}]}. + +{mapping, "clusterer.nodes.$node", "rabbitmq_clusterer.config.nodes", + [{datatype, atom}]}. + +{mapping, "clusterer.nodes.ram.$node", "rabbitmq_clusterer.config.nodes", + [{datatype, atom}]}. + +{mapping, "clusterer.nodes.disk.$node", "rabbitmq_clusterer.config.nodes", + [{datatype, atom}]}. + +{mapping, "clusterer.nodes.disc.$node", "rabbitmq_clusterer.config.nodes", + [{datatype, atom}]}. + +{translation, "rabbitmq_clusterer.config.nodes", +fun(Conf) -> + DiskNodes = cuttlefish_variable:filter_by_prefix("clusterer.nodes", Conf) + ++ cuttlefish_variable:filter_by_prefix("clusterer.nodes.disk", Conf) + ++ cuttlefish_variable:filter_by_prefix("clusterer.nodes.disc", Conf), + RamNodes = cuttlefish_variable:filter_by_prefix("clusterer.nodes.ram", Conf), + [{Node, disk} || {_, Node} <- DiskNodes] ++ [{Node, ram} || Node <- RamNodes] +end}. + +{mapping, "clusterer.gospel", "rabbitmq_clusterer.config.gospel", + [{datatype, {enum, [reset]}}]}. + +{mapping, "clusterer.gospel.node", "rabbitmq_clusterer.config.gospel", + [{datatype, atom}]}. + +{translation, "rabbitmq_clusterer.config.gospel", +fun(Conf) -> + case cuttlefish:conf_get("clusterer.gospel", Conf, undefined) of + reset -> reset; + _ -> + {node, cuttlefish:conf_get("clusterer.gospel.node", Conf)} + end +end}. + + + diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_management.schema b/test/config_schema_SUITE_data/schema/rabbitmq_management.schema new file mode 100644 index 000000000000..7ac6d21b93f6 --- /dev/null +++ b/test/config_schema_SUITE_data/schema/rabbitmq_management.schema @@ -0,0 +1,203 @@ +%% ---------------------------------------------------------------------------- +%% RabbitMQ Management Plugin +%% +%% See http://www.rabbitmq.com/management.html for details +%% ---------------------------------------------------------------------------- + + % {rabbitmq_management, + % [%% Pre-Load schema definitions from the following JSON file. See +%% http://www.rabbitmq.com/management.html#load-definitions +%% +%% {load_definitions, "/path/to/schema.json"}, +{mapping, "management.load_definitions", "rabbitmq_management.load_definitions", + [{datatype, string}, + {validators, ["file_accessible"]}]}. + +%% Log all requests to the management HTTP API to a file. +%% +%% {http_log_dir, "/path/to/access.log"}, + +{mapping, "management.http_log_dir", "rabbitmq_management.http_log_dir", + [{datatype, string}]}. + + +%% Change the port on which the HTTP listener listens, +%% specifying an interface for the web server to bind to. +%% Also set the listener to use SSL and provide SSL options. +%% +%% {listener, [{port, 12345}, +%% {ip, "127.0.0.1"}, +%% {ssl, true}, +%% {ssl_opts, [{cacertfile, "/path/to/cacert.pem"}, +%% {certfile, "/path/to/cert.pem"}, +%% {keyfile, "/path/to/key.pem"}]}]}, + +{mapping, "management.listener.port", "rabbitmq_management.listener.port", + [{datatype, integer}]}. + +{mapping, "management.listener.ip", "rabbitmq_management.listener.ip", + [{datatype, string}, + {validators, ["is_ip"]}]}. + +{mapping, "management.listener.ssl", "rabbitmq_management.listener.ssl", + [{datatype, {enum, [true, false]}}]}. + + +%% SSL options section ======================================================== + +{mapping, "management.listener.ssl_opts", "rabbitmq_management.listener.ssl_opts", [ + {datatype, {enum, [none]}} +]}. + +{translation, "rabbitmq_management.listener.ssl_opts", +fun(Conf) -> + case cuttlefish:conf_get("management.listener.ssl_opts", Conf, undefined) of + none -> []; + _ -> cuttlefish:invalid("Invalid management.listener.ssl_opts") + end +end}. + +{mapping, "management.listener.ssl_opts.verify", "rabbitmq_management.listener.ssl_opts.verify", [ + {datatype, {enum, [verify_peer, verify_none]}}]}. + +{mapping, "management.listener.ssl_opts.fail_if_no_peer_cert", "rabbitmq_management.listener.ssl_opts.fail_if_no_peer_cert", [ + {datatype, {enum, [true, false]}}]}. + +{mapping, "management.listener.ssl_opts.cacertfile", "rabbitmq_management.listener.ssl_opts.cacertfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. + +{mapping, "management.listener.ssl_opts.certfile", "rabbitmq_management.listener.ssl_opts.certfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. + +{mapping, "management.listener.ssl_opts.cacerts.$name", "rabbitmq_management.listener.ssl_opts.cacerts", + [{datatype, string}]}. + +{translation, "rabbitmq_management.listener.ssl_opts.cacerts", +fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("management.listener.ssl_opts.cacerts", Conf), + [ list_to_binary(V) || {_, V} <- Settings ] +end}. + +{mapping, "management.listener.ssl_opts.cert", "rabbitmq_management.listener.ssl_opts.cert", + [{datatype, string}]}. + +{translation, "rabbitmq_management.listener.ssl_opts.cert", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("management.listener.ssl_opts.cert", Conf)) +end}. + +{mapping, "management.listener.ssl_opts.client_renegotiation", "rabbitmq_management.listener.ssl_opts.client_renegotiation", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "management.listener.ssl_opts.crl_check", "rabbitmq_management.listener.ssl_opts.crl_check", + [{datatype, [{enum, [true, false, peer, best_effort]}]}]}. + +{mapping, "management.listener.ssl_opts.depth", "rabbitmq_management.listener.ssl_opts.depth", + [{datatype, integer}, {validators, ["byte"]}]}. + +{mapping, "management.listener.ssl_opts.dh", "rabbitmq_management.listener.ssl_opts.dh", + [{datatype, string}]}. + +{translation, "rabbitmq_management.listener.ssl_opts.dh", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("management.listener.ssl_opts.dh", Conf)) +end}. + +{mapping, "management.listener.ssl_opts.dhfile", "rabbitmq_management.listener.ssl_opts.dhfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. + +{mapping, "management.listener.ssl_opts.honor_cipher_order", "rabbitmq_management.listener.ssl_opts.honor_cipher_order", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "management.listener.ssl_opts.key.RSAPrivateKey", "rabbitmq_management.listener.ssl_opts.key", + [{datatype, string}]}. + +{mapping, "management.listener.ssl_opts.key.DSAPrivateKey", "rabbitmq_management.listener.ssl_opts.key", + [{datatype, string}]}. + +{mapping, "management.listener.ssl_opts.key.PrivateKeyInfo", "rabbitmq_management.listener.ssl_opts.key", + [{datatype, string}]}. + +{translation, "rabbitmq_management.listener.ssl_opts.key", +fun(Conf) -> + case cuttlefish_variable:filter_by_prefix("management.listener.ssl_opts.key", Conf) of + [{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)}; + _ -> undefined + end +end}. + +{mapping, "management.listener.ssl_opts.keyfile", "rabbitmq_management.listener.ssl_opts.keyfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. + +{mapping, "management.listener.ssl_opts.log_alert", "rabbitmq_management.listener.ssl_opts.log_alert", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "management.listener.ssl_opts.password", "rabbitmq_management.listener.ssl_opts.password", + [{datatype, string}]}. + +{mapping, "management.listener.ssl_opts.psk_identity", "rabbitmq_management.listener.ssl_opts.psk_identity", + [{datatype, string}]}. + +{mapping, "management.listener.ssl_opts.reuse_sessions", "rabbitmq_management.listener.ssl_opts.reuse_sessions", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "management.listener.ssl_opts.secure_renegotiate", "rabbitmq_management.listener.ssl_opts.secure_renegotiate", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "management.listener.ssl_opts.versions.$version", "rabbitmq_management.listener.ssl_opts.versions", + [{datatype, atom}]}. + +{translation, "rabbitmq_management.listener.ssl_opts.versions", +fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("management.listener.ssl_opts.versions", Conf), + [ V || {_, V} <- Settings ] +end}. + +%% =========================================================================== + + +%% One of 'basic', 'detailed' or 'none'. See +%% http://www.rabbitmq.com/management.html#fine-stats for more details. +%% {rates_mode, basic}, +{mapping, "management.rates_mode", "rabbitmq_management.rates_mode", + [{datatype, {enum, [basic, detailed, none]}}]}. + +%% Configure how long aggregated data (such as message rates and queue +%% lengths) is retained. Please read the plugin's documentation in +%% http://www.rabbitmq.com/management.html#configuration for more +%% details. +%% +%% {sample_retention_policies, +%% [{global, [{60, 5}, {3600, 60}, {86400, 1200}]}, +%% {basic, [{60, 5}, {3600, 60}]}, +%% {detailed, [{10, 5}]}]} +% ]}, + +{mapping, "management.sample_retention_policies.$section.$interval", + "rabbitmq_management.sample_retention_policies", + [{datatype, integer}]}. + +{translation, "rabbitmq_management.sample_retention_policies", +fun(Conf) -> + Global = cuttlefish_variable:filter_by_prefix("management.sample_retention_policies.global", Conf), + Basic = cuttlefish_variable:filter_by_prefix("management.sample_retention_policies.basic", Conf), + Detailed = cuttlefish_variable:filter_by_prefix("management.sample_retention_policies.detailed", Conf), + TranslateKey = fun("minute") -> 60; + ("hour") -> 3600; + ("day") -> 86400; + (Other) -> list_to_integer(Other) + end, + TranslatePolicy = fun(Section) -> + [ {TranslateKey(Key), Val} || {[_,_,_,Key], Val} <- Section ] + end, + [{global, TranslatePolicy(Global)}, + {basic, TranslatePolicy(Basic)}, + {detailed, TranslatePolicy(Detailed)}] +end}. + + +{validator, "is_dir", "is not directory", +fun(File) -> + ReadFile = file:list_dir(File), + element(1, ReadFile) == ok +end}. diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_metronome.schema b/test/config_schema_SUITE_data/schema/rabbitmq_metronome.schema new file mode 100644 index 000000000000..53cf8f003eba --- /dev/null +++ b/test/config_schema_SUITE_data/schema/rabbitmq_metronome.schema @@ -0,0 +1,9 @@ + +{mapping, "metronome.exchange", "rabbitmq_metronome.exchange", + [{datatype, string}]}. + +{translation, "rabbitmq_metronome.exchange", +fun(Conf) -> + Exchange = cuttlefish:conf_get("metronome.exchange", Conf), + list_to_binary(Exchange) +end}. \ No newline at end of file diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_mqtt.schema b/test/config_schema_SUITE_data/schema/rabbitmq_mqtt.schema new file mode 100644 index 000000000000..1daab5423d58 --- /dev/null +++ b/test/config_schema_SUITE_data/schema/rabbitmq_mqtt.schema @@ -0,0 +1,235 @@ +%% ---------------------------------------------------------------------------- +%% RabbitMQ MQTT Adapter +%% +%% See https://github.com/rabbitmq/rabbitmq-mqtt/blob/stable/README.md +%% for details +%% ---------------------------------------------------------------------------- + +% {rabbitmq_mqtt, +% [%% Set the default user name and password. Will be used as the default login +%% if a connecting client provides no other login details. +%% +%% Please note that setting this will allow clients to connect without +%% authenticating! +%% +%% {default_user, <<"guest">>}, +%% {default_pass, <<"guest">>}, + +{mapping, "mqtt.default_user", "rabbitmq_mqtt.default_user", [ + {datatype, string} +]}. + +{mapping, "mqtt.default_pass", "rabbitmq_mqtt.default_pass", [ + {datatype, string} +]}. + +{translation, "rabbitmq_mqtt.default_user", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("mqtt.default_user", Conf)) +end}. + +{translation, "rabbitmq_mqtt.default_pass", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("mqtt.default_pass", Conf)) +end}. + +%% Enable anonymous access. If this is set to false, clients MUST provide +%% login information in order to connect. See the default_user/default_pass +%% configuration elements for managing logins without authentication. +%% +%% {allow_anonymous, true}, + +{mapping, "mqtt.allow_anonymous", "rabbitmq_mqtt.allow_anonymous", + [{datatype, {enum, [true, false]}}]}. + +%% If you have multiple chosts, specify the one to which the +%% adapter connects. +%% +%% {vhost, <<"/">>}, + +{mapping, "mqtt.vhost", "rabbitmq_mqtt.vhost", [{datatype, string}]}. + +{translation, "rabbitmq_mqtt.vhost", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("mqtt.vhost", Conf)) +end}. + +%% Specify the exchange to which messages from MQTT clients are published. +%% +%% {exchange, <<"amq.topic">>}, + +{mapping, "mqtt.exchange", "rabbitmq_mqtt.exchange", [{datatype, string}]}. + +{translation, "rabbitmq_mqtt.exchange", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("mqtt.exchange", Conf)) +end}. + +%% Specify TTL (time to live) to control the lifetime of non-clean sessions. +%% +%% {subscription_ttl, 1800000}, +{mapping, "mqtt.subscription_ttl", "rabbitmq_mqtt.subscription_ttl", [ + {datatype, [{enum, [undefined, infinity]}, integer]} +]}. + +{translation, "rabbitmq_mqtt.subscription_ttl", +fun(Conf) -> + case cuttlefish:conf_get("mqtt.subscription_ttl", Conf, undefined) of + undefined -> undefined; + infinity -> undefined; + Ms -> Ms + end +end}. + +%% Set the prefetch count (governing the maximum number of unacknowledged +%% messages that will be delivered). +%% +%% {prefetch, 10}, +{mapping, "mqtt.prefetch", "rabbitmq_mqtt.prefetch", + [{datatype, integer}]}. + + +{mapping, "mqtt.retained_message_store", "rabbitmq_mqtt.retained_message_store", + [{datatype, atom}]}. + +{mapping, "mqtt.retained_message_store_dets_sync_interval", "rabbitmq_mqtt.retained_message_store_dets_sync_interval", + [{datatype, integer}]}. + + + +%% TCP/SSL Configuration (as per the broker configuration). +%% +%% {tcp_listeners, [1883]}, +%% {ssl_listeners, []}, + +{mapping, "mqtt.listeners.tcp", "rabbitmq_mqtt.tcp_listeners",[ + {datatype, {enum, [none]}} +]}. + +{mapping, "mqtt.listeners.tcp.$name", "rabbitmq_mqtt.tcp_listeners",[ + {datatype, [integer, ip]} +]}. + +{translation, "rabbitmq_mqtt.tcp_listeners", +fun(Conf) -> + case cuttlefish:conf_get("mqtt.listeners.tcp", Conf, undefined) of + none -> []; + _ -> + Settings = cuttlefish_variable:filter_by_prefix("mqtt.listeners.tcp", Conf), + [ V || {_, V} <- Settings ] + end +end}. + +{mapping, "mqtt.listeners.ssl", "rabbitmq_mqtt.ssl_listeners",[ + {datatype, {enum, [none]}} +]}. + +{mapping, "mqtt.listeners.ssl.$name", "rabbitmq_mqtt.ssl_listeners",[ + {datatype, [integer, ip]} +]}. + +{translation, "rabbitmq_mqtt.ssl_listeners", +fun(Conf) -> + case cuttlefish:conf_get("mqtt.listeners.ssl", Conf, undefined) of + none -> []; + _ -> + Settings = cuttlefish_variable:filter_by_prefix("mqtt.listeners.ssl", Conf), + [ V || {_, V} <- Settings ] + end +end}. + +%% Number of Erlang processes that will accept connections for the TCP +%% and SSL listeners. +%% +%% {num_tcp_acceptors, 10}, +%% {num_ssl_acceptors, 1}, + +{mapping, "mqtt.num_acceptors.ssl", "rabbitmq_mqtt.num_ssl_acceptors", [ + {datatype, integer} +]}. + +{mapping, "mqtt.num_acceptors.tcp", "rabbitmq_mqtt.num_tcp_acceptors", [ + {datatype, integer} +]}. + +{mapping, "mqtt.ssl_cert_login", "rabbitmq_mqtt.ssl_cert_login", [ + {datatype, {enum, [true, false]}}]}. + + +%% TCP/Socket options (as per the broker configuration). +%% +%% {tcp_listen_options, [{backlog, 128}, +%% {nodelay, true}]} +% ]}, + +%% TCP listener section ====================================================== + +{mapping, "mqtt.tcp_listen_options", "rabbitmq_mqtt.rabbit.tcp_listen_options", [ + {datatype, {enum, [none]}}]}. + +{translation, "rabbitmq_mqtt.rabbit.tcp_listen_options", +fun(Conf) -> + case cuttlefish:conf_get("mqtt.tcp_listen_options") of + none -> []; + _ -> cuttlefish:invalid("Invalid mqtt.tcp_listen_options") + end +end}. + +{mapping, "mqtt.tcp_listen_options.backlog", "rabbitmq_mqtt.tcp_listen_options.backlog", [ + {datatype, integer} +]}. + +{mapping, "mqtt.tcp_listen_options.nodelay", "rabbitmq_mqtt.tcp_listen_options.nodelay", [ + {datatype, {enum, [true, false]}} +]}. + +{mapping, "mqtt.tcp_listen_options.buffer", "rabbitmq_mqtt.tcp_listen_options.buffer", + [{datatype, integer}]}. + +{mapping, "mqtt.tcp_listen_options.delay_send", "rabbitmq_mqtt.tcp_listen_options.delay_send", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "mqtt.tcp_listen_options.dontroute", "rabbitmq_mqtt.tcp_listen_options.dontroute", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "mqtt.tcp_listen_options.exit_on_close", "rabbitmq_mqtt.tcp_listen_options.exit_on_close", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "mqtt.tcp_listen_options.fd", "rabbitmq_mqtt.tcp_listen_options.fd", + [{datatype, integer}]}. + +{mapping, "mqtt.tcp_listen_options.high_msgq_watermark", "rabbitmq_mqtt.tcp_listen_options.high_msgq_watermark", + [{datatype, integer}]}. + +{mapping, "mqtt.tcp_listen_options.high_watermark", "rabbitmq_mqtt.tcp_listen_options.high_watermark", + [{datatype, integer}]}. + +{mapping, "mqtt.tcp_listen_options.keepalive", "rabbitmq_mqtt.tcp_listen_options.keepalive", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "mqtt.tcp_listen_options.low_msgq_watermark", "rabbitmq_mqtt.tcp_listen_options.low_msgq_watermark", + [{datatype, integer}]}. + +{mapping, "mqtt.tcp_listen_options.low_watermark", "rabbitmq_mqtt.tcp_listen_options.low_watermark", + [{datatype, integer}]}. + +{mapping, "mqtt.tcp_listen_options.port", "rabbitmq_mqtt.tcp_listen_options.port", + [{datatype, integer}, {validators, ["port"]}]}. + +{mapping, "mqtt.tcp_listen_options.priority", "rabbitmq_mqtt.tcp_listen_options.priority", + [{datatype, integer}]}. + +{mapping, "mqtt.tcp_listen_options.recbuf", "rabbitmq_mqtt.tcp_listen_options.recbuf", + [{datatype, integer}]}. + +{mapping, "mqtt.tcp_listen_options.send_timeout", "rabbitmq_mqtt.tcp_listen_options.send_timeout", + [{datatype, integer}]}. + +{mapping, "mqtt.tcp_listen_options.send_timeout_close", "rabbitmq_mqtt.tcp_listen_options.send_timeout_close", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "mqtt.tcp_listen_options.sndbuf", "rabbitmq_mqtt.tcp_listen_options.sndbuf", + [{datatype, integer}]}. + +{mapping, "mqtt.tcp_listen_options.tos", "rabbitmq_mqtt.tcp_listen_options.tos", + [{datatype, integer}]}. diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_stomp.schema b/test/config_schema_SUITE_data/schema/rabbitmq_stomp.schema new file mode 100644 index 000000000000..b7619f0b2867 --- /dev/null +++ b/test/config_schema_SUITE_data/schema/rabbitmq_stomp.schema @@ -0,0 +1,110 @@ +%% ========================================================================== +%% ---------------------------------------------------------------------------- +%% RabbitMQ Stomp Adapter +%% +%% See http://www.rabbitmq.com/stomp.html for details +%% ---------------------------------------------------------------------------- + +% {rabbitmq_stomp, +% [%% Network Configuration - the format is generally the same as for the broker + +%% Listen only on localhost (ipv4 & ipv6) on a specific port. +%% {tcp_listeners, [{"127.0.0.1", 61613}, +%% {"::1", 61613}]}, + +{mapping, "stomp.listeners.tcp", "rabbitmq_stomp.tcp_listeners",[ + {datatype, {enum, [none]}} +]}. + +{mapping, "stomp.listeners.tcp.$name", "rabbitmq_stomp.tcp_listeners",[ + {datatype, [integer, ip]} +]}. + +{translation, "rabbitmq_stomp.tcp_listeners", +fun(Conf) -> + case cuttlefish:conf_get("stomp.listeners.tcp", Conf, undefined) of + none -> []; + _ -> + Settings = cuttlefish_variable:filter_by_prefix("stomp.listeners.tcp", Conf), + [ V || {_, V} <- Settings ] + end +end}. + +{mapping, "stomp.listeners.ssl", "rabbitmq_stomp.ssl_listeners",[ + {datatype, {enum, [none]}} +]}. + +{mapping, "stomp.listeners.ssl.$name", "rabbitmq_stomp.ssl_listeners",[ + {datatype, [integer, ip]} +]}. + +{translation, "rabbitmq_stomp.ssl_listeners", +fun(Conf) -> + case cuttlefish:conf_get("stomp.listeners.ssl", Conf, undefined) of + none -> []; + _ -> + Settings = cuttlefish_variable:filter_by_prefix("stomp.listeners.ssl", Conf), + [ V || {_, V} <- Settings ] + end +end}. + +%% Number of Erlang processes that will accept connections for the TCP +%% and SSL listeners. +%% +%% {num_tcp_acceptors, 10}, +%% {num_ssl_acceptors, 1}, + +{mapping, "stomp.num_acceptors.ssl", "rabbitmq_stomp.num_ssl_acceptors", [ + {datatype, integer} +]}. + +{mapping, "stomp.num_acceptors.tcp", "rabbitmq_stomp.num_tcp_acceptors", [ + {datatype, integer} +]}. + +%% Additional SSL options + +%% Extract a name from the client's certificate when using SSL. +%% +%% {ssl_cert_login, true}, + +{mapping, "stomp.ssl_cert_login", "rabbitmq_stomp.ssl_cert_login", + [{datatype, {enum, [true, false]}}]}. + +%% Set a default user name and password. This is used as the default login +%% whenever a CONNECT frame omits the login and passcode headers. +%% +%% Please note that setting this will allow clients to connect without +%% authenticating! +%% +%% {default_user, [{login, "guest"}, +%% {passcode, "guest"}]}, + +{mapping, "stomp.default_vhost", "rabbitmq_stomp.default_vhost", [ + {datatype, string} +]}. + +{translation, "rabbitmq_stomp.default_vhost", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("stomp.default_vhost", Conf, "/")) +end}. + +{mapping, "stomp.default_user", "rabbitmq_stomp.default_user.login", [ + {datatype, string} +]}. + +{mapping, "stomp.default_pass", "rabbitmq_stomp.default_user.passcode", [ + {datatype, string} +]}. + +%% If a default user is configured, or you have configured use SSL client +%% certificate based authentication, you can choose to allow clients to +%% omit the CONNECT frame entirely. If set to true, the client is +%% automatically connected as the default user or user supplied in the +%% SSL certificate whenever the first frame sent on a session is not a +%% CONNECT frame. +%% +%% {implicit_connect, true} +% ]}, +{mapping, "stomp.implicit_connect", "rabbitmq_stomp.implicit_connect", + [{datatype, {enum, [true, false]}}]}. diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_web_mqtt.schema b/test/config_schema_SUITE_data/schema/rabbitmq_web_mqtt.schema new file mode 100644 index 000000000000..acdab62c3222 --- /dev/null +++ b/test/config_schema_SUITE_data/schema/rabbitmq_web_mqtt.schema @@ -0,0 +1,44 @@ +{mapping, "web_mqtt.num_acceptors.tcp", "rabbitmq_web_mqtt.num_tcp_acceptors", + [{datatype, integer}]}. +{mapping, "web_mqtt.num_acceptors.ssl", "rabbitmq_web_mqtt.num_ssl_acceptors", + [{datatype, integer}]}. + +{mapping, "web_mqtt.tcp.port", "rabbitmq_web_mqtt.tcp_config.port", + [{datatype, integer}]}. +{mapping, "web_mqtt.tcp.backlog", "rabbitmq_web_mqtt.tcp_config.backlog", + [{datatype, integer}]}. +{mapping, "web_mqtt.tcp.ip", "rabbitmq_web_mqtt.tcp_config.ip", + [{datatype, string}, {validators, ["is_ip"]}]}. + + +{mapping, "web_mqtt.ssl.port", "rabbitmq_web_mqtt.ssl_config.port", + [{datatype, integer}]}. +{mapping, "web_mqtt.ssl.backlog", "rabbitmq_web_mqtt.ssl_config.backlog", + [{datatype, integer}]}. +{mapping, "web_mqtt.ssl.ip", "rabbitmq_web_mqtt.ssl_config.ip", + [{datatype, string}, {validators, ["is_ip"]}]}. +{mapping, "web_mqtt.ssl.certfile", "rabbitmq_web_mqtt.ssl_config.certfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. +{mapping, "web_mqtt.ssl.keyfile", "rabbitmq_web_mqtt.ssl_config.keyfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. +{mapping, "web_mqtt.ssl.cacertfile", "rabbitmq_web_mqtt.ssl_config.cacertfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. +{mapping, "web_mqtt.ssl.password", "rabbitmq_web_mqtt.ssl_config.password", + [{datatype, string}]}. + + +{mapping, "web_mqtt.cowboy_opts.max_empty_lines", "rabbitmq_web_mqtt.cowboy_opts.max_empty_lines", + [{datatype, integer}]}. +{mapping, "web_mqtt.cowboy_opts.max_header_name_length", "rabbitmq_web_mqtt.cowboy_opts.max_header_name_length", + [{datatype, integer}]}. +{mapping, "web_mqtt.cowboy_opts.max_header_value_length", "rabbitmq_web_mqtt.cowboy_opts.max_header_value_length", + [{datatype, integer}]}. +{mapping, "web_mqtt.cowboy_opts.max_headers", "rabbitmq_web_mqtt.cowboy_opts.max_headers", + [{datatype, integer}]}. +{mapping, "web_mqtt.cowboy_opts.max_keepalive", "rabbitmq_web_mqtt.cowboy_opts.max_keepalive", + [{datatype, integer}]}. +{mapping, "web_mqtt.cowboy_opts.max_request_line_length", "rabbitmq_web_mqtt.cowboy_opts.max_request_line_length", + [{datatype, integer}]}. +{mapping, "web_mqtt.cowboy_opts.timeout", "rabbitmq_web_mqtt.cowboy_opts.timeout", + [{datatype, integer}]}. + diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_web_stomp.schema b/test/config_schema_SUITE_data/schema/rabbitmq_web_stomp.schema new file mode 100644 index 000000000000..389da07d14c8 --- /dev/null +++ b/test/config_schema_SUITE_data/schema/rabbitmq_web_stomp.schema @@ -0,0 +1,64 @@ +{mapping, "web_stomp.port", "rabbitmq_web_stomp.port", + [{datatype, integer}]}. + +{mapping, "web_stomp.ws_frame", "rabbitmq_web_stomp.ws_frame", + [{datatype, {enum, [binary, text]}}]}. + +{mapping, "web_stomp.num_acceptors.tcp", "rabbitmq_web_stomp.num_tcp_acceptors", + [{datatype, integer}]}. + +{mapping, "web_stomp.num_acceptors.ssl", "rabbitmq_web_stomp.num_ssl_acceptors", + [{datatype, integer}]}. + +{mapping, "web_stomp.tcp.port", "rabbitmq_web_stomp.tcp_config.port", + [{datatype, integer}]}. +{mapping, "web_stomp.tcp.backlog", "rabbitmq_web_stomp.tcp_config.backlog", + [{datatype, integer}]}. +{mapping, "web_stomp.tcp.ip", "rabbitmq_web_stomp.tcp_config.ip", + [{datatype, string}, {validators, ["is_ip"]}]}. + + +{mapping, "web_stomp.ssl.port", "rabbitmq_web_stomp.ssl_config.port", + [{datatype, integer}]}. +{mapping, "web_stomp.ssl.backlog", "rabbitmq_web_stomp.ssl_config.backlog", + [{datatype, integer}]}. +{mapping, "web_stomp.ssl.ip", "rabbitmq_web_stomp.ssl_config.ip", + [{datatype, string}, {validators, ["is_ip"]}]}. +{mapping, "web_stomp.ssl.certfile", "rabbitmq_web_stomp.ssl_config.certfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. +{mapping, "web_stomp.ssl.keyfile", "rabbitmq_web_stomp.ssl_config.keyfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. +{mapping, "web_stomp.ssl.cacertfile", "rabbitmq_web_stomp.ssl_config.cacertfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. +{mapping, "web_stomp.ssl.password", "rabbitmq_web_stomp.ssl_config.password", + [{datatype, string}]}. + + +{mapping, "web_stomp.cowboy_opts.max_empty_lines", "rabbitmq_web_stomp.cowboy_opts.max_empty_lines", + [{datatype, integer}]}. +{mapping, "web_stomp.cowboy_opts.max_header_name_length", "rabbitmq_web_stomp.cowboy_opts.max_header_name_length", + [{datatype, integer}]}. +{mapping, "web_stomp.cowboy_opts.max_header_value_length", "rabbitmq_web_stomp.cowboy_opts.max_header_value_length", + [{datatype, integer}]}. +{mapping, "web_stomp.cowboy_opts.max_headers", "rabbitmq_web_stomp.cowboy_opts.max_headers", + [{datatype, integer}]}. +{mapping, "web_stomp.cowboy_opts.max_keepalive", "rabbitmq_web_stomp.cowboy_opts.max_keepalive", + [{datatype, integer}]}. +{mapping, "web_stomp.cowboy_opts.max_request_line_length", "rabbitmq_web_stomp.cowboy_opts.max_request_line_length", + [{datatype, integer}]}. +{mapping, "web_stomp.cowboy_opts.timeout", "rabbitmq_web_stomp.cowboy_opts.timeout", + [{datatype, integer}]}. + + +{mapping, "web_stomp.sockjs_opts.url", "rabbitmq_web_stomp.sockjs_opts.sockjs_url", + [{datatype, string}]}. +{mapping, "web_stomp.sockjs_opts.websocket", "rabbitmq_web_stomp.sockjs_opts.websocket", + [{datatype, {enum, [true, false]}}]}. +{mapping, "web_stomp.sockjs_opts.cookie_needed", "rabbitmq_web_stomp.sockjs_opts.cookie_needed", + [{datatype, {enum, [true, false]}}]}. +{mapping, "web_stomp.sockjs_opts.heartbeat_delay", "rabbitmq_web_stomp.sockjs_opts.heartbeat_delay", + [{datatype, integer}]}. +{mapping, "web_stomp.sockjs_opts.disconnect_delay", "rabbitmq_web_stomp.sockjs_opts.disconnect_delay", + [{datatype, integer}]}. +{mapping, "web_stomp.sockjs_opts.response_limit", "rabbitmq_web_stomp.sockjs_opts.response_limit", + [{datatype, integer}]}. From df7a84271155ecc3765c066cf35e3dccc786e31c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Tue, 31 May 2016 11:38:20 +0200 Subject: [PATCH 130/174] Backport the use of rabbitmq-build.mk from master --- Makefile | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index c3c0df5c77c6..ccf1921b008e 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,8 @@ EXTRA_SOURCES += $(USAGES_ERL) .DEFAULT_GOAL = all $(PROJECT).d:: $(EXTRA_SOURCES) -DEP_PLUGINS = rabbit_common/mk/rabbitmq-run.mk \ +DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ + rabbit_common/mk/rabbitmq-run.mk \ rabbit_common/mk/rabbitmq-dist.mk \ rabbit_common/mk/rabbitmq-tools.mk @@ -101,19 +102,11 @@ USE_PROPER_QC := $(shell $(ERL) -eval 'io:format({module, proper} =:= code:ensur RMQ_ERLC_OPTS += $(if $(filter true,$(USE_PROPER_QC)),-Duse_proper_qc) endif -ERLC_OPTS += $(RMQ_ERLC_OPTS) - clean:: clean-extra-sources clean-extra-sources: $(gen_verbose) rm -f $(EXTRA_SOURCES) -# -------------------------------------------------------------------- -# Tests. -# -------------------------------------------------------------------- - -TEST_ERLC_OPTS += $(RMQ_ERLC_OPTS) - # -------------------------------------------------------------------- # Documentation. # -------------------------------------------------------------------- From 0e5c975720bcb70c2de50f88d1d87a4099556855 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Wed, 1 Jun 2016 01:52:05 +0300 Subject: [PATCH 131/174] Include rabbitmq_jms_topic_exchange into RabbitMQ distribution Now that our JMS client is open sourced we can do it. --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index ccf1921b008e..1cdc1f797c7b 100644 --- a/Makefile +++ b/Makefile @@ -44,6 +44,7 @@ DISTRIBUTED_DEPS := rabbitmq_amqp1_0 \ rabbitmq_event_exchange \ rabbitmq_federation \ rabbitmq_federation_management \ + rabbitmq_jms_topic_exchange \ rabbitmq_management \ rabbitmq_management_agent \ rabbitmq_management_visualiser \ From 249b7fb84ce52273bfa58ceb9068990151b0dd44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Thu, 2 Jun 2016 11:45:31 +0200 Subject: [PATCH 132/174] unit_inbroker_SUITE: Give one hour for the backing queue tests In our CI, some tests are hitting the default commont_test timetrap of 30 minutes. --- test/unit_inbroker_SUITE.erl | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/test/unit_inbroker_SUITE.erl b/test/unit_inbroker_SUITE.erl index dfde1fba220f..e20f63ba8bb2 100644 --- a/test/unit_inbroker_SUITE.erl +++ b/test/unit_inbroker_SUITE.erl @@ -141,6 +141,14 @@ groups() -> ]} ]. +group(backing_queue_tests) -> + [ + %% Several tests based on lazy queues may take more than 30 minutes. + {timetrap, {hours, 1}} + ]; +group(_) -> + []. + %% ------------------------------------------------------------------- %% Testsuite setup/teardown. %% ------------------------------------------------------------------- From f2a8427b7d44f6de633cfe6abaccee9dc06710a1 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Thu, 2 Jun 2016 14:56:45 +0300 Subject: [PATCH 133/174] Squash a warning --- src/rabbit_exchange.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 676a6561f249..5d646e0c9039 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -355,7 +355,7 @@ i(Item, #exchange{type = Type} = X) -> info(X = #exchange{type = Type}) -> infos(?INFO_KEYS, X) ++ (type_to_module(Type)):info(X). -info(X = #exchange{type = Type}, Items) -> +info(X = #exchange{type = _Type}, Items) -> infos(Items, X). info_all(VHostPath) -> map(VHostPath, fun (X) -> info(X) end). From f1f7dac2cc37e60c3f6a43b66a96104ec0c5d909 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Fri, 3 Jun 2016 15:10:40 +0300 Subject: [PATCH 134/174] Fixes #820. --- src/rabbit.erl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 2cc353d7b8d5..a1a70e5ee973 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -489,6 +489,7 @@ await_startup(HaveSeenRabbitBoot) -> status() -> S1 = [{pid, list_to_integer(os:getpid())}, + %% The timeout value used is twice that of gen_server:call/2. {running_applications, rabbit_misc:which_applications()}, {os, os:type()}, {erlang_version, erlang:system_info(system_version)}, @@ -545,8 +546,9 @@ is_running() -> is_running(node()). is_running(Node) -> rabbit_nodes:is_process_running(Node, rabbit). environment() -> + %% The timeout value is twice that of gen_server:call/2. [{A, environment(A)} || - {A, _, _} <- lists:keysort(1, application:which_applications())]. + {A, _, _} <- lists:keysort(1, application:which_applications(10000))]. environment(App) -> Ignore = [default_pass, included_applications], From 2bba5edb6f78e436a1c1c3d3b9655d9fb821ad6c Mon Sep 17 00:00:00 2001 From: Alexey Lebedeff Date: Thu, 2 Jun 2016 19:21:59 +0300 Subject: [PATCH 135/174] Check cluster_status liveness during OCF checks We've observed some `autoheal` bug that made `cluster_status` became stuck forever. --- scripts/rabbitmq-server-ha.ocf | 43 +++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/scripts/rabbitmq-server-ha.ocf b/scripts/rabbitmq-server-ha.ocf index 8a2075d9f0c6..6c1e52251e7a 100755 --- a/scripts/rabbitmq-server-ha.ocf +++ b/scripts/rabbitmq-server-ha.ocf @@ -1591,6 +1591,10 @@ get_monitor() { fi fi + if ! is-cluster-status-ok ; then + rc=$OCF_ERR_GENERIC + fi + # Check if the list of all queues is available, # Also report some queues stats and total virtual memory. local queues @@ -1630,6 +1634,36 @@ get_monitor() { return $rc } +ocf-update-private-attr() { + local attr_name="${1:?}" + local attr_value="${2:?}" + ocf_run attrd_updater -p --name "$attr_name" --update "$attr_value" +} + +rabbitmqctl-with-timeout-check() { + local command="${1:?}" + local timeout_attr_name="${2:?}" + + su_rabbit_cmd "${OCF_RESKEY_ctl} $command" + local rc=$? + + check_timeouts $rc $timeout_attr_name "$command" + local has_timed_out=$? + + case "$has_timed_out" in + 0) + return $rc;; + 1) + return 0;; + 2) + return 1;; + esac +} + +is-cluster-status-ok() { + local LH="${LH}: is-cluster-status-ok:" + rabbitmqctl-with-timeout-check cluster_status rabbit_cluster_status_timeouts > /dev/null 2>&1 +} action_monitor() { local rc=$OCF_ERR_GENERIC @@ -1670,9 +1704,12 @@ action_start() { return $OCF_SUCCESS fi - ocf_run attrd_updater -p --name 'rabbit_list_channels_timeouts' --update '0' - ocf_run attrd_updater -p --name 'rabbit_get_alarms_timeouts' --update '0' - ocf_run attrd_updater -p --name 'rabbit_list_queues_timeouts' --update '0' + local attrs_to_zero="rabbit_list_channels_timeouts rabbit_get_alarms_timeouts rabbit_list_queues_timeouts rabbit_cluster_status_timeouts" + local attr_name_to_reset + for attr_name_to_reset in $attrs_to_zero; do + ocf-update-private-attr $attr_name_to_reset 0 + done + ocf_log info "${LH} Deleting start time attribute" ocf_run crm_attribute -N $THIS_PCMK_NODE -l reboot --name 'rabbit-start-time' --delete ocf_log info "${LH} Deleting master attribute" From eb60bcfe5dec939d577fbde00b1a140031fb1d03 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Sun, 5 Jun 2016 02:16:15 +0300 Subject: [PATCH 136/174] Brief docs for rabbit_amqqueue_process state record --- src/rabbit_amqqueue_process.erl | 34 ++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 1a86851d0ac2..844890a52a94 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -33,27 +33,59 @@ prioritise_cast/3, prioritise_info/3, format_message_queue/2]). %% Queue's state --record(q, {q, +-record(q, { + %% an #amqqueue record + q, + %% none | {exclusive consumer channel PID, consumer tag} exclusive_consumer, + %% Set to true if a queue has ever had a consumer. + %% This is used to determine when to delete auto-delete queues. has_had_consumers, + %% backing queue module. + %% for mirrored queues, this will be rabbit_mirror_queue_master. + %% for non-priority and non-mirrored queues, rabbit_variable_queue. + %% see rabbit_backing_queue. backing_queue, + %% backing queue state. + %% see rabbit_backing_queue, rabbit_variable_queue. backing_queue_state, + %% consumers state, see rabbit_queue_consumers consumers, + %% queue expiration value expires, + %% timer used to periodically sync (flush) queue index sync_timer_ref, + %% timer used to update ingress/egress rates and queue RAM duration target rate_timer_ref, + %% timer used to clean up this queue due to TTL (on when unused) expiry_timer_ref, + %% stats emission timer stats_timer, + %% maps message IDs to {channel pid, MsgSeqNo} + %% pairs msg_id_to_channel, + %% message TTL value ttl, + %% timer used to delete expired messages ttl_timer_ref, ttl_timer_expiry, + %% Keeps track of channels that publish to this queue. + %% When channel process goes down, queues have to perform + %% certain cleanup. senders, + %% dead letter exchange as a #resource record, if any dlx, dlx_routing_key, + %% max length in messages, if configured max_length, + %% max length in bytes, if configured max_bytes, + %% used to discard outdated/superceded policy updates, + %% e.g. when policies are applied concurrently. See + %% https://github.com/rabbitmq/rabbitmq-server/issues/803 for one + %% example. args_policy_version, + %% running | flow | idle status }). From 4edec0969fa31c26e9c6020ad1bab33a0491f292 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Sun, 5 Jun 2016 02:22:33 +0300 Subject: [PATCH 137/174] Docs correction --- src/rabbit_amqqueue_process.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 844890a52a94..cd21aa62b87f 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -80,10 +80,10 @@ max_length, %% max length in bytes, if configured max_bytes, - %% used to discard outdated/superceded policy updates, - %% e.g. when policies are applied concurrently. See - %% https://github.com/rabbitmq/rabbitmq-server/issues/803 for one - %% example. + %% when policies change, this version helps queue + %% determine what previously scheduled/set up state to ignore, + %% e.g. message expiration messages from previously set up timers + %% that may or may not be still valid args_policy_version, %% running | flow | idle status From ac7225809362f2df541a656bc0611506566e361a Mon Sep 17 00:00:00 2001 From: sylvainhubsch Date: Mon, 18 Apr 2016 14:55:20 -0700 Subject: [PATCH 138/174] Match on types deleted and shortcuts algo --- src/rabbit_exchange_type_headers.erl | 52 ++++++++++++++++++---------- 1 file changed, 34 insertions(+), 18 deletions(-) diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index fe344ba86e35..444d507c7e59 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -85,35 +85,51 @@ headers_match(Args, Data) -> MK = parse_x_match(rabbit_misc:table_lookup(Args, <<"x-match">>)), headers_match(Args, Data, true, false, MK). -headers_match([], _Data, AllMatch, _AnyMatch, all) -> - AllMatch; -headers_match([], _Data, _AllMatch, AnyMatch, any) -> - AnyMatch; +% A bit less horrendous algorithm :) +headers_match(_, _, false, _, all) -> false; +headers_match(_, _, _, true, any) -> true; + +% No more bindings, return current state +headers_match([], _Data, AllMatch, _AnyMatch, all) -> AllMatch; +headers_match([], _Data, _AllMatch, AnyMatch, any) -> AnyMatch; + +% Delete bindings starting with x- headers_match([{<<"x-", _/binary>>, _PT, _PV} | PRest], Data, AllMatch, AnyMatch, MatchKind) -> headers_match(PRest, Data, AllMatch, AnyMatch, MatchKind); + +% No more data, but still bindings, false with all headers_match(_Pattern, [], _AllMatch, AnyMatch, MatchKind) -> headers_match([], [], false, AnyMatch, MatchKind); + +% Data key header not in binding, go next data headers_match(Pattern = [{PK, _PT, _PV} | _], [{DK, _DT, _DV} | DRest], AllMatch, AnyMatch, MatchKind) when PK > DK -> headers_match(Pattern, DRest, AllMatch, AnyMatch, MatchKind); + +% Binding key header not in data, false with all, go next binding headers_match([{PK, _PT, _PV} | PRest], Data = [{DK, _DT, _DV} | _], _AllMatch, AnyMatch, MatchKind) when PK < DK -> headers_match(PRest, Data, false, AnyMatch, MatchKind); -headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest], - AllMatch, AnyMatch, MatchKind) when PK == DK -> - {AllMatch1, AnyMatch1} = - case rabbit_misc:type_class(PT) == rabbit_misc:type_class(DT) of - %% It's not properly specified, but a "no value" in a - %% pattern field is supposed to mean simple presence of - %% the corresponding data field. I've interpreted that to - %% mean a type of "void" for the pattern field. - _ when PT == void -> {AllMatch, true}; - false -> {false, AnyMatch}; - _ when PV == DV -> {AllMatch, true}; - _ -> {false, AnyMatch} - end, - headers_match(PRest, DRest, AllMatch1, AnyMatch1, MatchKind). + +%% It's not properly specified, but a "no value" in a +%% pattern field is supposed to mean simple presence of +%% the corresponding data field. I've interpreted that to +%% mean a type of "void" for the pattern field. +headers_match([{PK, void, _PV} | PRest], [{DK, _DT, _DV} | DRest], + AllMatch, _AnyMatch, MatchKind) when PK == DK -> + headers_match(PRest, DRest, AllMatch, true, MatchKind); + +% Complete match, true with any, go next +headers_match([{PK, _PT, PV} | PRest], [{DK, _DT, DV} | DRest], + AllMatch, _AnyMatch, MatchKind) when PK == DK andalso PV == DV -> + headers_match(PRest, DRest, AllMatch, true, MatchKind); + +% Value does not match, false with all, go next +headers_match([{PK, _PT, _PV} | PRest], [{DK, _DT, _DV} | DRest], + _AllMatch, AnyMatch, MatchKind) when PK == DK -> + headers_match(PRest, DRest, false, AnyMatch, MatchKind). + validate(_X) -> ok. create(_Tx, _X) -> ok. From b6db63151efffa7c091702b78aac64f84f4ffcb2 Mon Sep 17 00:00:00 2001 From: sylvainhubsch Date: Tue, 19 Apr 2016 00:46:07 -0700 Subject: [PATCH 139/174] Compare values only on headers match --- src/rabbit_exchange_type_headers.erl | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index 444d507c7e59..c9dedbbe9542 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -111,26 +111,16 @@ headers_match(Pattern = [{PK, _PT, _PV} | _], [{DK, _DT, _DV} | DRest], headers_match([{PK, _PT, _PV} | PRest], Data = [{DK, _DT, _DV} | _], _AllMatch, AnyMatch, MatchKind) when PK < DK -> headers_match(PRest, Data, false, AnyMatch, MatchKind); - -%% It's not properly specified, but a "no value" in a -%% pattern field is supposed to mean simple presence of -%% the corresponding data field. I've interpreted that to -%% mean a type of "void" for the pattern field. headers_match([{PK, void, _PV} | PRest], [{DK, _DT, _DV} | DRest], AllMatch, _AnyMatch, MatchKind) when PK == DK -> headers_match(PRest, DRest, AllMatch, true, MatchKind); - -% Complete match, true with any, go next headers_match([{PK, _PT, PV} | PRest], [{DK, _DT, DV} | DRest], AllMatch, _AnyMatch, MatchKind) when PK == DK andalso PV == DV -> headers_match(PRest, DRest, AllMatch, true, MatchKind); - -% Value does not match, false with all, go next headers_match([{PK, _PT, _PV} | PRest], [{DK, _DT, _DV} | DRest], _AllMatch, AnyMatch, MatchKind) when PK == DK -> headers_match(PRest, DRest, false, AnyMatch, MatchKind). - validate(_X) -> ok. create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. From 6853bb46b9516a9a06d764a258510659d02b5ef0 Mon Sep 17 00:00:00 2001 From: sylvainhubsch Date: Tue, 19 Apr 2016 00:52:30 -0700 Subject: [PATCH 140/174] Stop match earlier, and added some comments --- src/rabbit_exchange_type_headers.erl | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index c9dedbbe9542..444d507c7e59 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -111,16 +111,26 @@ headers_match(Pattern = [{PK, _PT, _PV} | _], [{DK, _DT, _DV} | DRest], headers_match([{PK, _PT, _PV} | PRest], Data = [{DK, _DT, _DV} | _], _AllMatch, AnyMatch, MatchKind) when PK < DK -> headers_match(PRest, Data, false, AnyMatch, MatchKind); + +%% It's not properly specified, but a "no value" in a +%% pattern field is supposed to mean simple presence of +%% the corresponding data field. I've interpreted that to +%% mean a type of "void" for the pattern field. headers_match([{PK, void, _PV} | PRest], [{DK, _DT, _DV} | DRest], AllMatch, _AnyMatch, MatchKind) when PK == DK -> headers_match(PRest, DRest, AllMatch, true, MatchKind); + +% Complete match, true with any, go next headers_match([{PK, _PT, PV} | PRest], [{DK, _DT, DV} | DRest], AllMatch, _AnyMatch, MatchKind) when PK == DK andalso PV == DV -> headers_match(PRest, DRest, AllMatch, true, MatchKind); + +% Value does not match, false with all, go next headers_match([{PK, _PT, _PV} | PRest], [{DK, _DT, _DV} | DRest], _AllMatch, AnyMatch, MatchKind) when PK == DK -> headers_match(PRest, DRest, false, AnyMatch, MatchKind). + validate(_X) -> ok. create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. From d1dc0edc655963e07a2ab0571a0dd4f458b7c6b3 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Tue, 7 Jun 2016 13:29:20 +0100 Subject: [PATCH 141/174] ETS tables memory consumption info --- src/rabbit_vm.erl | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/src/rabbit_vm.erl b/src/rabbit_vm.erl index 82effb4fc5e0..d67331aec7e2 100644 --- a/src/rabbit_vm.erl +++ b/src/rabbit_vm.erl @@ -16,7 +16,7 @@ -module(rabbit_vm). --export([memory/0, binary/0]). +-export([memory/0, binary/0, ets_tables_memory/1]). -define(MAGIC_PLUGINS, ["mochiweb", "webmachine", "cowboy", "sockjs", "rfc4627_jsonrpc"]). @@ -27,6 +27,9 @@ -spec(memory/0 :: () -> rabbit_types:infos()). -spec(binary/0 :: () -> rabbit_types:infos()). +-spec(ets_tables_memory/1 :: (Owners) -> rabbit_types:infos() + when Owners :: all | OwnerProcessName | [OwnerProcessName], + OwnerProcessName :: atom()). -endif. @@ -118,10 +121,19 @@ mnesia_memory() -> end. ets_memory(OwnerNames) -> + lists:sum([V || {_K, V} <- ets_tables_memory(OwnerNames)]). + +ets_tables_memory(all) -> + [{ets:info(T, name), bytes(ets:info(T, memory))} + || T <- ets:all(), + is_atom(T)]; +ets_tables_memory(OwnerName) when is_atom(OwnerName) -> + ets_tables_memory([OwnerName]); +ets_tables_memory(OwnerNames) when is_list(OwnerNames) -> Owners = [whereis(N) || N <- OwnerNames], - lists:sum([bytes(ets:info(T, memory)) || T <- ets:all(), - O <- [ets:info(T, owner)], - lists:member(O, Owners)]). + [{ets:info(T, name), bytes(ets:info(T, memory))} + || T <- ets:all(), + lists:member(ets:info(T, owner), Owners)]. bytes(Words) -> try Words * erlang:system_info(wordsize) From 7e1f0b0213560837c46f829b2b1cbdeb89f7d46a Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Tue, 7 Jun 2016 13:58:23 +0100 Subject: [PATCH 142/174] Performance improvements on file_handle_cache --- src/file_handle_cache.erl | 99 ++++++++++++++++++++------------------ src/rabbit_msg_store.erl | 16 +++--- src/rabbit_queue_index.erl | 19 +++++--- 3 files changed, 73 insertions(+), 61 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index d5f0cbee6f5b..78b009503672 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -145,7 +145,8 @@ -export([register_callback/3]). -export([open/3, close/1, read/2, append/2, needs_sync/1, sync/1, position/2, truncate/1, current_virtual_offset/1, current_raw_offset/1, flush/1, - copy/3, set_maximum_since_use/1, delete/1, clear/1]). + copy/3, set_maximum_since_use/1, delete/1, clear/1, + open_with_absolute_path/3]). -export([obtain/0, obtain/1, release/0, release/1, transfer/1, transfer/2, set_limit/1, get_limit/0, info_keys/0, with_handle/1, with_handle/2, info/0, info/1, clear_read_cache/0]). @@ -249,6 +250,11 @@ [{'write_buffer', (non_neg_integer() | 'infinity' | 'unbuffered')} | {'read_buffer', (non_neg_integer() | 'unbuffered')}]) -> val_or_error(ref())). +-spec(open_with_absolute_path/3 :: + (file:filename(), [any()], + [{'write_buffer', (non_neg_integer() | 'infinity' | 'unbuffered')} | + {'read_buffer', (non_neg_integer() | 'unbuffered')}]) + -> val_or_error(ref())). -spec(close/1 :: (ref()) -> ok_or_error()). -spec(read/2 :: (ref(), non_neg_integer()) -> val_or_error([char()] | binary()) | 'eof'). @@ -300,9 +306,11 @@ register_callback(M, F, A) gen_server2:cast(?SERVER, {register_callback, self(), {M, F, A}}). open(Path, Mode, Options) -> - Path1 = filename:absname(Path), + open_with_absolute_path(filename:absname(Path), Mode, Options). + +open_with_absolute_path(Path, Mode, Options) -> File1 = #file { reader_count = RCount, has_writer = HasWriter } = - case get({Path1, fhc_file}) of + case get({Path, fhc_file}) of File = #file {} -> File; undefined -> #file { reader_count = 0, has_writer = false } @@ -311,7 +319,7 @@ open(Path, Mode, Options) -> IsWriter = is_writer(Mode1), case IsWriter andalso HasWriter of true -> {error, writer_exists}; - false -> {ok, Ref} = new_closed_handle(Path1, Mode1, Options), + false -> {ok, Ref} = new_closed_handle(Path, Mode1, Options), case get_or_reopen([{Ref, new}]) of {ok, [_Handle1]} -> RCount1 = case is_reader(Mode1) of @@ -319,7 +327,7 @@ open(Path, Mode, Options) -> false -> RCount end, HasWriter1 = HasWriter orelse IsWriter, - put({Path1, fhc_file}, + put({Path, fhc_file}, File1 #file { reader_count = RCount1, has_writer = HasWriter1 }), {ok, Ref}; @@ -375,7 +383,7 @@ read(Ref, Count) -> offset = Offset} = tune_read_buffer_limit(Handle0, Count), WantedCount = Count - BufRem, - case prim_file_read(Hdl, lists:max([BufSz, WantedCount])) of + case prim_file_read(Hdl, max(BufSz, WantedCount)) of {ok, Data} -> <<_:BufPos/binary, BufTl/binary>> = Buf, ReadCount = size(Data), @@ -1297,11 +1305,6 @@ pending_out({N, Queue}) -> pending_count({Count, _Queue}) -> Count. -pending_is_empty({0, _Queue}) -> - true; -pending_is_empty({_N, _Queue}) -> - false. - %%---------------------------------------------------------------------------- %% server helpers %%---------------------------------------------------------------------------- @@ -1348,17 +1351,24 @@ process_open(State = #fhc_state { limit = Limit, {Pending1, State1} = process_pending(Pending, Limit - used(State), State), State1 #fhc_state { open_pending = Pending1 }. -process_obtain(Type, State = #fhc_state { limit = Limit, - obtain_limit = ObtainLimit }) -> - ObtainCount = obtain_state(Type, count, State), - Pending = obtain_state(Type, pending, State), - Quota = case Type of - file -> Limit - (used(State)); - socket -> lists:min([ObtainLimit - ObtainCount, - Limit - (used(State))]) - end, +process_obtain(socket, State = #fhc_state { limit = Limit, + obtain_limit = ObtainLimit, + open_count = OpenCount, + obtain_count_socket = ObtainCount, + obtain_pending_socket = Pending, + obtain_count_file = ObtainCountF}) -> + Quota = min(ObtainLimit - ObtainCount, + Limit - (OpenCount + ObtainCount + ObtainCountF)), {Pending1, State1} = process_pending(Pending, Quota, State), - set_obtain_state(Type, pending, Pending1, State1). + State1#fhc_state{obtain_pending_socket = Pending1}; +process_obtain(file, State = #fhc_state { limit = Limit, + open_count = OpenCount, + obtain_count_socket = ObtainCountS, + obtain_count_file = ObtainCountF, + obtain_pending_file = Pending}) -> + Quota = Limit - (OpenCount + ObtainCountS + ObtainCountF), + {Pending1, State1} = process_pending(Pending, Quota, State), + State1#fhc_state{obtain_pending_file = Pending1}. process_pending(Pending, Quota, State) when Quota =< 0 -> {Pending, State}; @@ -1383,26 +1393,21 @@ run_pending_item(#pending { kind = Kind, true = ets:update_element(Clients, Pid, {#cstate.blocked, false}), update_counts(Kind, Pid, Requested, State). -update_counts(Kind, Pid, Delta, +update_counts(open, Pid, Delta, State = #fhc_state { open_count = OpenCount, - obtain_count_file = ObtainCountF, - obtain_count_socket = ObtainCountS, clients = Clients }) -> - {OpenDelta, ObtainDeltaF, ObtainDeltaS} = - update_counts1(Kind, Pid, Delta, Clients), - State #fhc_state { open_count = OpenCount + OpenDelta, - obtain_count_file = ObtainCountF + ObtainDeltaF, - obtain_count_socket = ObtainCountS + ObtainDeltaS }. - -update_counts1(open, Pid, Delta, Clients) -> ets:update_counter(Clients, Pid, {#cstate.opened, Delta}), - {Delta, 0, 0}; -update_counts1({obtain, file}, Pid, Delta, Clients) -> + State #fhc_state { open_count = OpenCount + Delta}; +update_counts({obtain, file}, Pid, Delta, + State = #fhc_state {obtain_count_file = ObtainCountF, + clients = Clients }) -> ets:update_counter(Clients, Pid, {#cstate.obtained_file, Delta}), - {0, Delta, 0}; -update_counts1({obtain, socket}, Pid, Delta, Clients) -> + State #fhc_state { obtain_count_file = ObtainCountF + Delta}; +update_counts({obtain, socket}, Pid, Delta, + State = #fhc_state {obtain_count_socket = ObtainCountS, + clients = Clients }) -> ets:update_counter(Clients, Pid, {#cstate.obtained_socket, Delta}), - {0, 0, Delta}. + State #fhc_state { obtain_count_socket = ObtainCountS + Delta}. maybe_reduce(State) -> case needs_reduce(State) of @@ -1410,18 +1415,20 @@ maybe_reduce(State) -> false -> State end. -needs_reduce(State = #fhc_state { limit = Limit, - open_pending = OpenPending, - obtain_limit = ObtainLimit, - obtain_count_socket = ObtainCountS, - obtain_pending_file = ObtainPendingF, - obtain_pending_socket = ObtainPendingS }) -> +needs_reduce(#fhc_state { limit = Limit, + open_count = OpenCount, + open_pending = {OpenPending, _}, + obtain_limit = ObtainLimit, + obtain_count_socket = ObtainCountS, + obtain_count_file = ObtainCountF, + obtain_pending_file = {ObtainPendingF, _}, + obtain_pending_socket = {ObtainPendingS, _} }) -> Limit =/= infinity - andalso ((used(State) > Limit) - orelse (not pending_is_empty(OpenPending)) - orelse (not pending_is_empty(ObtainPendingF)) + andalso (((OpenCount + ObtainCountS + ObtainCountF) > Limit) + orelse (OpenPending =/= 0) + orelse (ObtainPendingF =/= 0) orelse (ObtainCountS < ObtainLimit - andalso not pending_is_empty(ObtainPendingS))). + andalso (ObtainPendingS =/= 0))). reduce(State = #fhc_state { open_pending = OpenPending, obtain_pending_file = ObtainPendingFile, diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 6754c606bbf4..230066468734 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1360,9 +1360,10 @@ should_mask_action(CRef, MsgId, %%---------------------------------------------------------------------------- open_file(Dir, FileName, Mode) -> - file_handle_cache:open(form_filename(Dir, FileName), ?BINARY_MODE ++ Mode, - [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}, - {read_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]). + file_handle_cache:open_with_absolute_path( + form_filename(Dir, FileName), ?BINARY_MODE ++ Mode, + [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}, + {read_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]). close_handle(Key, CState = #client_msstate { file_handle_cache = FHC }) -> CState #client_msstate { file_handle_cache = close_handle(Key, FHC) }; @@ -2112,10 +2113,11 @@ transform_dir(BaseDir, Store, TransformFun) -> transform_msg_file(FileOld, FileNew, TransformFun) -> ok = rabbit_file:ensure_parent_dirs_exist(FileNew), - {ok, RefOld} = file_handle_cache:open(FileOld, [raw, binary, read], []), - {ok, RefNew} = file_handle_cache:open(FileNew, [raw, binary, write], - [{write_buffer, - ?HANDLE_CACHE_BUFFER_SIZE}]), + {ok, RefOld} = file_handle_cache:open_with_absolute_path( + FileOld, [raw, binary, read], []), + {ok, RefNew} = file_handle_cache:open_with_absolute_path( + FileNew, [raw, binary, write], + [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]), {ok, _Acc, _IgnoreSize} = rabbit_msg_file:scan( RefOld, filelib:file_size(FileOld), diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 981d8e74ff00..06b6961edb8e 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -816,8 +816,9 @@ append_journal_to_segment(#segment { journal_entries = JEntries, _ -> file_handle_cache_stats:update(queue_index_write), - {ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE, - [{write_buffer, infinity}]), + {ok, Hdl} = file_handle_cache:open_with_absolute_path( + Path, ?WRITE_MODE, + [{write_buffer, infinity}]), %% the file_handle_cache also does a list reverse, so this %% might not be required here, but before we were doing a %% sparse_foldr, a lists:reverse/1 seems to be the correct @@ -832,8 +833,8 @@ get_journal_handle(State = #qistate { journal_handle = undefined, dir = Dir }) -> Path = filename:join(Dir, ?JOURNAL_FILENAME), ok = rabbit_file:ensure_dir(Path), - {ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE, - [{write_buffer, infinity}]), + {ok, Hdl} = file_handle_cache:open_with_absolute_path( + Path, ?WRITE_MODE, [{write_buffer, infinity}]), {Hdl, State #qistate { journal_handle = Hdl }}; get_journal_handle(State = #qistate { journal_handle = Hdl }) -> {Hdl, State}. @@ -1058,7 +1059,8 @@ load_segment(KeepAcked, #segment { path = Path }) -> false -> Empty; true -> Size = rabbit_file:file_size(Path), file_handle_cache_stats:update(queue_index_read), - {ok, Hdl} = file_handle_cache:open(Path, ?READ_MODE, []), + {ok, Hdl} = file_handle_cache:open_with_absolute_path( + Path, ?READ_MODE, []), {ok, 0} = file_handle_cache:position(Hdl, bof), {ok, SegBin} = file_handle_cache:read(Hdl, Size), ok = file_handle_cache:close(Hdl), @@ -1383,10 +1385,11 @@ transform_file(Path, Fun) when is_function(Fun)-> case rabbit_file:file_size(Path) of 0 -> ok; Size -> {ok, PathTmpHdl} = - file_handle_cache:open(PathTmp, ?WRITE_MODE, - [{write_buffer, infinity}]), + file_handle_cache:open_with_absolute_path( + PathTmp, ?WRITE_MODE, + [{write_buffer, infinity}]), - {ok, PathHdl} = file_handle_cache:open( + {ok, PathHdl} = file_handle_cache:open_with_absolute_path( Path, ?READ_MODE, [{read_buffer, Size}]), {ok, Content} = file_handle_cache:read(PathHdl, Size), ok = file_handle_cache:close(PathHdl), From d59ac4c4700e370a8bb27864fdcd15cc5dcece19 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Wed, 8 Jun 2016 02:54:24 +0300 Subject: [PATCH 143/174] Extract rabbit_mirror_queue_misc:sync_queue/1 and cancel_sync_queue/1 Having these functions outside of rabbit_control_main makes it possible to use them in other tools, e.g. an alternative CLI implementation. --- src/rabbit_control_main.erl | 8 ++------ src/rabbit_mirror_queue_misc.erl | 12 ++++++++++++ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/src/rabbit_control_main.erl b/src/rabbit_control_main.erl index 2df4fd96c0c7..271ca4484e34 100644 --- a/src/rabbit_control_main.erl +++ b/src/rabbit_control_main.erl @@ -675,14 +675,10 @@ action(list_consumers, Node, _Args, Opts, Inform, Timeout) -> format_parse_error({_Line, Mod, Err}) -> lists:flatten(Mod:format_error(Err)). sync_queue(Q) -> - rabbit_amqqueue:with( - Q, fun(#amqqueue{pid = QPid}) -> rabbit_amqqueue:sync_mirrors(QPid) end). + rabbit_mirror_queue_misc:sync_queue(Q). cancel_sync_queue(Q) -> - rabbit_amqqueue:with( - Q, fun(#amqqueue{pid = QPid}) -> - rabbit_amqqueue:cancel_sync_mirrors(QPid) - end). + rabbit_mirror_queue_misc:cancel_sync_queue(Q). purge_queue(Q) -> rabbit_amqqueue:with( diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl index 849efa361174..fad20711aa51 100644 --- a/src/rabbit_mirror_queue_misc.erl +++ b/src/rabbit_mirror_queue_misc.erl @@ -24,6 +24,8 @@ maybe_auto_sync/1, maybe_drop_master_after_sync/1, sync_batch_size/1, log_info/3, log_warning/3]). +-export([sync_queue/1, cancel_sync_queue/1]). + %% for testing only -export([module/1]). @@ -364,6 +366,16 @@ maybe_auto_sync(Q = #amqqueue{pid = QPid}) -> ok end. +sync_queue(Q) -> + rabbit_amqqueue:with( + Q, fun(#amqqueue{pid = QPid}) -> rabbit_amqqueue:sync_mirrors(QPid) end). + +cancel_sync_queue(Q) -> + rabbit_amqqueue:with( + Q, fun(#amqqueue{pid = QPid}) -> + rabbit_amqqueue:cancel_sync_mirrors(QPid) + end). + sync_batch_size(#amqqueue{} = Q) -> case policy(<<"ha-sync-batch-size">>, Q) of none -> %% we need this case because none > 1 == true From cef24b75d605eae79137b2cdf7b15b373c091ba5 Mon Sep 17 00:00:00 2001 From: Alexey Lebedeff Date: Wed, 8 Jun 2016 17:42:26 +0300 Subject: [PATCH 144/174] Fix bashisms in OCF HA script `-` is not allowed in function names by POSIX, and some shells (e.g. `dash`) will consider this as a syntax error. --- scripts/rabbitmq-server-ha.ocf | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/scripts/rabbitmq-server-ha.ocf b/scripts/rabbitmq-server-ha.ocf index 6c1e52251e7a..cd07d0c1b0d3 100755 --- a/scripts/rabbitmq-server-ha.ocf +++ b/scripts/rabbitmq-server-ha.ocf @@ -1591,7 +1591,7 @@ get_monitor() { fi fi - if ! is-cluster-status-ok ; then + if ! is_cluster_status_ok ; then rc=$OCF_ERR_GENERIC fi @@ -1634,13 +1634,13 @@ get_monitor() { return $rc } -ocf-update-private-attr() { +ocf_update_private_attr() { local attr_name="${1:?}" local attr_value="${2:?}" ocf_run attrd_updater -p --name "$attr_name" --update "$attr_value" } -rabbitmqctl-with-timeout-check() { +rabbitmqctl_with_timeout_check() { local command="${1:?}" local timeout_attr_name="${2:?}" @@ -1660,9 +1660,9 @@ rabbitmqctl-with-timeout-check() { esac } -is-cluster-status-ok() { - local LH="${LH}: is-cluster-status-ok:" - rabbitmqctl-with-timeout-check cluster_status rabbit_cluster_status_timeouts > /dev/null 2>&1 +is_cluster_status_ok() { + local LH="${LH}: is_cluster_status_ok:" + rabbitmqctl_with_timeout_check cluster_status rabbit_cluster_status_timeouts > /dev/null 2>&1 } action_monitor() { @@ -1707,7 +1707,7 @@ action_start() { local attrs_to_zero="rabbit_list_channels_timeouts rabbit_get_alarms_timeouts rabbit_list_queues_timeouts rabbit_cluster_status_timeouts" local attr_name_to_reset for attr_name_to_reset in $attrs_to_zero; do - ocf-update-private-attr $attr_name_to_reset 0 + ocf_update_private_attr $attr_name_to_reset 0 done ocf_log info "${LH} Deleting start time attribute" From 8c0700e809b9a1efcbb9a8394a9b4241b7c3f415 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Thu, 9 Jun 2016 15:48:11 +0300 Subject: [PATCH 145/174] Correct doc typo --- src/worker_pool.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/worker_pool.erl b/src/worker_pool.erl index 082e92446bda..6bacd43d273d 100644 --- a/src/worker_pool.erl +++ b/src/worker_pool.erl @@ -18,8 +18,8 @@ %% Generic worker pool manager. %% -%% Submitted jobs are functions. They can be executed asynchronously -%% (using worker_pool:submit/1, worker_pool:submit/2) or synchronously +%% Submitted jobs are functions. They can be executed synchronously +%% (using worker_pool:submit/1, worker_pool:submit/2) or asynchronously %% (using worker_pool:submit_async/1). %% %% We typically use the worker pool if we want to limit the maximum From ae501fde2f3e8bea5a4fbd53979027cecc6b9a77 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Thu, 9 Jun 2016 17:03:39 +0300 Subject: [PATCH 146/174] Significantly bump max restarts intensity for worker_pool_sup, fixes #834 --- src/worker_pool_sup.erl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/worker_pool_sup.erl b/src/worker_pool_sup.erl index d846c262466a..0fb8d0176b85 100644 --- a/src/worker_pool_sup.erl +++ b/src/worker_pool_sup.erl @@ -48,7 +48,11 @@ start_link(WCount, PoolName) -> %%---------------------------------------------------------------------------- init([WCount, PoolName]) -> - {ok, {{one_for_one, 10, 10}, + %% we want to survive up to 1K of worker restarts per second, + %% e.g. when a large worker pool used for network connections + %% encounters a network failure. This is the case in the LDAP authentication + %% backend plugin. + {ok, {{one_for_one, 10 * 1000, 10}, [{worker_pool, {worker_pool, start_link, [PoolName]}, transient, 16#ffffffff, worker, [worker_pool]} | [{N, {worker_pool_worker, start_link, [PoolName]}, transient, From 765b9c4aaeae2875c176516e38a6d47ff2affc19 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Thu, 9 Jun 2016 17:42:04 +0300 Subject: [PATCH 147/174] Simplify --- src/worker_pool_sup.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/worker_pool_sup.erl b/src/worker_pool_sup.erl index 0fb8d0176b85..3b2c3476c254 100644 --- a/src/worker_pool_sup.erl +++ b/src/worker_pool_sup.erl @@ -52,7 +52,7 @@ init([WCount, PoolName]) -> %% e.g. when a large worker pool used for network connections %% encounters a network failure. This is the case in the LDAP authentication %% backend plugin. - {ok, {{one_for_one, 10 * 1000, 10}, + {ok, {{one_for_one, 1000, 1}, [{worker_pool, {worker_pool, start_link, [PoolName]}, transient, 16#ffffffff, worker, [worker_pool]} | [{N, {worker_pool_worker, start_link, [PoolName]}, transient, From 6b7fc63a5530e5efcd8001aa50a366ab9b23d780 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Mon, 13 Jun 2016 18:18:20 +0100 Subject: [PATCH 148/174] Move cleanup to prep_stop --- src/rabbit.erl | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index a1a70e5ee973..b7fe0fb455bb 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -22,7 +22,7 @@ stop_and_halt/0, await_startup/0, status/0, is_running/0, is_running/1, environment/0, rotate_logs/1, force_event_refresh/1, start_fhc/0]). --export([start/2, stop/1]). +-export([start/2, stop/1, prep_stop/1]). -export([start_apps/1, stop_apps/1]). -export([log_location/1, config_files/0]). %% for testing and mgmt-agent @@ -196,6 +196,8 @@ -define(ASYNC_THREADS_WARNING_THRESHOLD, 8). +-define(LEAVE_CLUSTER_TIMEOUT, 15000). + %%---------------------------------------------------------------------------- -ifdef(use_specs). @@ -590,14 +592,40 @@ start(normal, []) -> Error end. -stop(_State) -> +prep_stop(_State) -> ok = rabbit_alarm:stop(), ok = case rabbit_mnesia:is_clustered() of - true -> rabbit_amqqueue:on_node_down(node()); + true -> + %% We are starting on_node_down in separate process to + %% avoid blocking application_master and application_controller + %% This process will be executed normally whithin Timeout or + %% will be considered deadlocked and killed by exit signal + run_in_process_with_timeout( + fun() -> rabbit_amqqueue:on_node_down(node()) end, + ?LEAVE_CLUSTER_TIMEOUT), + ok; false -> rabbit_table:clear_ram_only_tables() end, ok. +stop(_) -> ok. + +run_in_process_with_timeout(Fun, Timeout) -> + Self = self(), + Ref = make_ref(), + Worker = spawn_link(fun () -> + put(worker_pool_worker, true), + Self ! {Ref, Fun()}, + unlink(Self) + end), + receive + {Ref, Res} -> Res + after Timeout -> + unlink(Worker), + exit(Worker, kill), + killed + end. + -ifdef(use_specs). -spec(boot_error/2 :: (term(), not_available | [tuple()]) -> no_return()). -endif. From b5ad8e6725a810b0065b3135c5f6b6e0b769d211 Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Tue, 14 Jun 2016 07:40:04 +0100 Subject: [PATCH 149/174] Restore original order in batch publish messages * During the synchronisation of slaves the message's order was reverted. Then, acks could not be processed as those msgs were not yet dropped. --- src/rabbit_priority_queue.erl | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/rabbit_priority_queue.erl b/src/rabbit_priority_queue.erl index a3bfb5cdfa40..ae8a38daf093 100644 --- a/src/rabbit_priority_queue.erl +++ b/src/rabbit_priority_queue.erl @@ -591,10 +591,15 @@ partition_publish_delivered_batch(Publishes, MaxP) -> Publishes, fun ({Msg, _}) -> Msg end, MaxP). partition_publishes(Publishes, ExtractMsg, MaxP) -> - lists:foldl(fun (Pub, Dict) -> - Msg = ExtractMsg(Pub), - rabbit_misc:orddict_cons(priority(Msg, MaxP), Pub, Dict) - end, orddict:new(), Publishes). + Partitioned = + lists:foldl(fun (Pub, Dict) -> + Msg = ExtractMsg(Pub), + rabbit_misc:orddict_cons(priority(Msg, MaxP), Pub, Dict) + end, orddict:new(), Publishes), + orddict:map(fun (_P, RevPubs) -> + lists:reverse(RevPubs) + end, Partitioned). + priority_bq(Priority, [{MaxP, _} | _] = BQSs) -> bq_fetch(priority(Priority, MaxP), BQSs). From 8fc07bedef4a0d0409696107d415f55846b2ca62 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Tue, 14 Jun 2016 11:09:16 +0100 Subject: [PATCH 150/174] Do not run on_node_down --- src/rabbit.erl | 26 +------------------------- 1 file changed, 1 insertion(+), 25 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index b7fe0fb455bb..84f979e9ad8b 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -595,37 +595,13 @@ start(normal, []) -> prep_stop(_State) -> ok = rabbit_alarm:stop(), ok = case rabbit_mnesia:is_clustered() of - true -> - %% We are starting on_node_down in separate process to - %% avoid blocking application_master and application_controller - %% This process will be executed normally whithin Timeout or - %% will be considered deadlocked and killed by exit signal - run_in_process_with_timeout( - fun() -> rabbit_amqqueue:on_node_down(node()) end, - ?LEAVE_CLUSTER_TIMEOUT), - ok; + true -> ok; false -> rabbit_table:clear_ram_only_tables() end, ok. stop(_) -> ok. -run_in_process_with_timeout(Fun, Timeout) -> - Self = self(), - Ref = make_ref(), - Worker = spawn_link(fun () -> - put(worker_pool_worker, true), - Self ! {Ref, Fun()}, - unlink(Self) - end), - receive - {Ref, Res} -> Res - after Timeout -> - unlink(Worker), - exit(Worker, kill), - killed - end. - -ifdef(use_specs). -spec(boot_error/2 :: (term(), not_available | [tuple()]) -> no_return()). -endif. From 55acdb57727cc2cda19972269693d4c9133d8810 Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Tue, 14 Jun 2016 12:29:56 +0100 Subject: [PATCH 151/174] Add tests for order of messages within each priority queue --- test/priority_queue_SUITE.erl | 46 ++++++++++++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 3 deletions(-) diff --git a/test/priority_queue_SUITE.erl b/test/priority_queue_SUITE.erl index 5df568609079..3e94e5bf02be 100644 --- a/test/priority_queue_SUITE.erl +++ b/test/priority_queue_SUITE.erl @@ -39,6 +39,7 @@ groups() -> mirror_queue_sync, mirror_queue_sync_priority_above_max, mirror_queue_sync_priority_above_max_pending_ack, + mirror_queue_sync_order, purge, requeue, resume, @@ -432,6 +433,33 @@ mirror_queue_auto_ack(Config) -> delete(Ch, Q), passed. +mirror_queue_sync_order(Config) -> + A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, A), + Ch2 = rabbit_ct_client_helpers:open_channel(Config, B), + Q = <<"mirror_queue_sync_order-queue">>, + declare(Ch, Q, 3), + publish_payload(Ch, Q, [{1, <<"msg1">>}, {2, <<"msg2">>}, + {2, <<"msg3">>}, {2, <<"msg4">>}, + {3, <<"msg5">>}]), + rabbit_ct_client_helpers:close_channel(Ch), + + %% Add and sync slave + ok = rabbit_ct_broker_helpers:set_ha_policy( + Config, A, <<"^mirror_queue_sync_order-queue$">>, <<"all">>), + rabbit_ct_broker_helpers:control_action(sync_queue, A, + [binary_to_list(Q)], [{"-p", "/"}]), + wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q)), + + %% Stop the master + rabbit_ct_broker_helpers:stop_node(Config, A), + + get_payload(Ch2, Q, do_ack, [<<"msg5">>, <<"msg2">>, <<"msg3">>, + <<"msg4">>, <<"msg1">>]), + + delete(Ch2, Q), + passed. %%---------------------------------------------------------------------------- open(Config) -> @@ -454,6 +482,11 @@ publish(Ch, Q, Ps) -> [publish1(Ch, Q, P) || P <- Ps], amqp_channel:wait_for_confirms(Ch). +publish_payload(Ch, Q, PPds) -> + amqp_channel:call(Ch, #'confirm.select'{}), + [publish1(Ch, Q, P, Pd) || {P, Pd} <- PPds], + amqp_channel:wait_for_confirms(Ch). + publish_many(_Ch, _Q, 0) -> ok; publish_many( Ch, Q, N) -> publish1(Ch, Q, random:uniform(5)), publish_many(Ch, Q, N - 1). @@ -463,6 +496,11 @@ publish1(Ch, Q, P) -> #amqp_msg{props = props(P), payload = priority2bin(P)}). +publish1(Ch, Q, P, Pd) -> + amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q}, + #amqp_msg{props = props(P), + payload = Pd}). + props(undefined) -> #'P_basic'{delivery_mode = 2}; props(P) -> #'P_basic'{priority = P, delivery_mode = 2}. @@ -494,19 +532,21 @@ get_all(Ch, Q, Ack, Ps) -> DTags. get_partial(Ch, Q, Ack, Ps) -> - [get_ok(Ch, Q, Ack, P) || P <- Ps]. + [get_ok(Ch, Q, Ack, priority2bin(P)) || P <- Ps]. get_empty(Ch, Q) -> #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = Q}). -get_ok(Ch, Q, Ack, P) -> - PBin = priority2bin(P), +get_ok(Ch, Q, Ack, PBin) -> {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} = amqp_channel:call(Ch, #'basic.get'{queue = Q, no_ack = Ack =:= no_ack}), PBin = PBin2, maybe_ack(Ch, Ack, DTag). +get_payload(Ch, Q, Ack, Ps) -> + [get_ok(Ch, Q, Ack, P) || P <- Ps]. + get_without_ack(Ch, Q) -> {#'basic.get_ok'{}, _} = amqp_channel:call(Ch, #'basic.get'{queue = Q, no_ack = false}). From c1b917cc1857cb3b380bf8079e9295551a04b2b9 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Tue, 14 Jun 2016 14:07:49 +0100 Subject: [PATCH 152/174] Unused const --- src/rabbit.erl | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 84f979e9ad8b..06c84b53b48f 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -196,8 +196,6 @@ -define(ASYNC_THREADS_WARNING_THRESHOLD, 8). --define(LEAVE_CLUSTER_TIMEOUT, 15000). - %%---------------------------------------------------------------------------- -ifdef(use_specs). From 089645318fbe56394935466537e7cf9c77809f21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Wed, 15 Jun 2016 17:39:13 +0200 Subject: [PATCH 153/174] priority_queue_SUITE: Use the new open_connection_and_channel() helper --- test/priority_queue_SUITE.erl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/priority_queue_SUITE.erl b/test/priority_queue_SUITE.erl index 3e94e5bf02be..56b44d423eb5 100644 --- a/test/priority_queue_SUITE.erl +++ b/test/priority_queue_SUITE.erl @@ -463,9 +463,7 @@ mirror_queue_sync_order(Config) -> %%---------------------------------------------------------------------------- open(Config) -> - Conn = rabbit_ct_client_helpers:open_connection(Config, 0), - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), - {Conn, Ch}. + rabbit_ct_client_helpers:open_connection_and_channel(Config, 0). declare(Ch, Q, Args) when is_list(Args) -> amqp_channel:call(Ch, #'queue.declare'{queue = Q, From 6bceb552a762fc0473624e62fe4af3e6e0317deb Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Thu, 16 Jun 2016 13:12:36 +0100 Subject: [PATCH 154/174] Set bounded timeout for OTP workers --- src/rabbit_amqqueue_sup.erl | 2 +- src/rabbit_amqqueue_sup_sup.erl | 2 +- src/rabbit_client_sup.erl | 2 +- src/rabbit_connection_helper_sup.erl | 2 +- src/rabbit_connection_sup.erl | 2 +- src/rabbit_restartable_sup.erl | 2 +- src/rabbit_sup.erl | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/rabbit_amqqueue_sup.erl b/src/rabbit_amqqueue_sup.erl index 6cc4f45b147a..e46ae0f508c6 100644 --- a/src/rabbit_amqqueue_sup.erl +++ b/src/rabbit_amqqueue_sup.erl @@ -39,7 +39,7 @@ start_link(Q, StartMode) -> Marker = spawn_link(fun() -> receive stop -> ok end end), ChildSpec = {rabbit_amqqueue, {rabbit_prequeue, start_link, [Q, StartMode, Marker]}, - intrinsic, ?MAX_WAIT, worker, [rabbit_amqqueue_process, + intrinsic, ?WORKER_WAIT, worker, [rabbit_amqqueue_process, rabbit_mirror_queue_slave]}, {ok, SupPid} = supervisor2:start_link(?MODULE, []), {ok, QPid} = supervisor2:start_child(SupPid, ChildSpec), diff --git a/src/rabbit_amqqueue_sup_sup.erl b/src/rabbit_amqqueue_sup_sup.erl index bb89eace7873..68aabbfe28a2 100644 --- a/src/rabbit_amqqueue_sup_sup.erl +++ b/src/rabbit_amqqueue_sup_sup.erl @@ -49,4 +49,4 @@ start_queue_process(Node, Q, StartMode) -> init([]) -> {ok, {{simple_one_for_one, 10, 10}, [{rabbit_amqqueue_sup, {rabbit_amqqueue_sup, start_link, []}, - temporary, ?MAX_WAIT, supervisor, [rabbit_amqqueue_sup]}]}}. + temporary, ?SUPERVISOR_WAIT, supervisor, [rabbit_amqqueue_sup]}]}}. diff --git a/src/rabbit_client_sup.erl b/src/rabbit_client_sup.erl index 5ca0cad5ae7c..3779fd1dc46b 100644 --- a/src/rabbit_client_sup.erl +++ b/src/rabbit_client_sup.erl @@ -53,5 +53,5 @@ init({M,F,A}) -> [{client, {M,F,A}, temporary, infinity, supervisor, [M]}]}}; init({{M,F,A}, worker}) -> {ok, {{simple_one_for_one, 0, 1}, - [{client, {M,F,A}, temporary, ?MAX_WAIT, worker, [M]}]}}. + [{client, {M,F,A}, temporary, ?WORKER_WAIT, worker, [M]}]}}. diff --git a/src/rabbit_connection_helper_sup.erl b/src/rabbit_connection_helper_sup.erl index d89bc3d75398..14374ac169cb 100644 --- a/src/rabbit_connection_helper_sup.erl +++ b/src/rabbit_connection_helper_sup.erl @@ -59,7 +59,7 @@ start_queue_collector(SupPid, Identity) -> supervisor2:start_child( SupPid, {collector, {rabbit_queue_collector, start_link, [Identity]}, - intrinsic, ?MAX_WAIT, worker, [rabbit_queue_collector]}). + intrinsic, ?WORKER_WAIT, worker, [rabbit_queue_collector]}). %%---------------------------------------------------------------------------- diff --git a/src/rabbit_connection_sup.erl b/src/rabbit_connection_sup.erl index bacdf3992642..161f5bfe0613 100644 --- a/src/rabbit_connection_sup.erl +++ b/src/rabbit_connection_sup.erl @@ -66,7 +66,7 @@ start_link(Ref, Sock, _Transport, _Opts) -> supervisor2:start_child( SupPid, {reader, {rabbit_reader, start_link, [HelperSup, Ref, Sock]}, - intrinsic, ?MAX_WAIT, worker, [rabbit_reader]}), + intrinsic, ?WORKER_WAIT, worker, [rabbit_reader]}), {ok, SupPid, ReaderPid}. reader(Pid) -> diff --git a/src/rabbit_restartable_sup.erl b/src/rabbit_restartable_sup.erl index ed35556f30e5..8517718add54 100644 --- a/src/rabbit_restartable_sup.erl +++ b/src/rabbit_restartable_sup.erl @@ -45,4 +45,4 @@ init([{Mod, _F, _A} = Fun, Delay]) -> [{Mod, Fun, case Delay of true -> {transient, 1}; false -> transient - end, ?MAX_WAIT, worker, [Mod]}]}}. + end, ?WORKER_WAIT, worker, [Mod]}]}}. diff --git a/src/rabbit_sup.erl b/src/rabbit_sup.erl index 501623d96b84..b33b35c7905a 100644 --- a/src/rabbit_sup.erl +++ b/src/rabbit_sup.erl @@ -62,7 +62,7 @@ start_child(ChildId, Mod, Args) -> child_reply(supervisor:start_child( ?SERVER, {ChildId, {Mod, start_link, Args}, - transient, ?MAX_WAIT, worker, [Mod]})). + transient, ?WORKER_WAIT, worker, [Mod]})). start_supervisor_child(Mod) -> start_supervisor_child(Mod, []). From 95e11692fea19715fd0ce93573a4f07dd21468f9 Mon Sep 17 00:00:00 2001 From: Daniil Fedotov Date: Thu, 16 Jun 2016 13:12:36 +0100 Subject: [PATCH 155/174] Set bounded timeout for OTP workers --- src/rabbit_amqqueue_sup.erl | 2 +- src/rabbit_amqqueue_sup_sup.erl | 2 +- src/rabbit_client_sup.erl | 2 +- src/rabbit_connection_helper_sup.erl | 2 +- src/rabbit_connection_sup.erl | 2 +- src/rabbit_restartable_sup.erl | 2 +- src/rabbit_sup.erl | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/rabbit_amqqueue_sup.erl b/src/rabbit_amqqueue_sup.erl index 6cc4f45b147a..e46ae0f508c6 100644 --- a/src/rabbit_amqqueue_sup.erl +++ b/src/rabbit_amqqueue_sup.erl @@ -39,7 +39,7 @@ start_link(Q, StartMode) -> Marker = spawn_link(fun() -> receive stop -> ok end end), ChildSpec = {rabbit_amqqueue, {rabbit_prequeue, start_link, [Q, StartMode, Marker]}, - intrinsic, ?MAX_WAIT, worker, [rabbit_amqqueue_process, + intrinsic, ?WORKER_WAIT, worker, [rabbit_amqqueue_process, rabbit_mirror_queue_slave]}, {ok, SupPid} = supervisor2:start_link(?MODULE, []), {ok, QPid} = supervisor2:start_child(SupPid, ChildSpec), diff --git a/src/rabbit_amqqueue_sup_sup.erl b/src/rabbit_amqqueue_sup_sup.erl index bb89eace7873..68aabbfe28a2 100644 --- a/src/rabbit_amqqueue_sup_sup.erl +++ b/src/rabbit_amqqueue_sup_sup.erl @@ -49,4 +49,4 @@ start_queue_process(Node, Q, StartMode) -> init([]) -> {ok, {{simple_one_for_one, 10, 10}, [{rabbit_amqqueue_sup, {rabbit_amqqueue_sup, start_link, []}, - temporary, ?MAX_WAIT, supervisor, [rabbit_amqqueue_sup]}]}}. + temporary, ?SUPERVISOR_WAIT, supervisor, [rabbit_amqqueue_sup]}]}}. diff --git a/src/rabbit_client_sup.erl b/src/rabbit_client_sup.erl index 5ca0cad5ae7c..3779fd1dc46b 100644 --- a/src/rabbit_client_sup.erl +++ b/src/rabbit_client_sup.erl @@ -53,5 +53,5 @@ init({M,F,A}) -> [{client, {M,F,A}, temporary, infinity, supervisor, [M]}]}}; init({{M,F,A}, worker}) -> {ok, {{simple_one_for_one, 0, 1}, - [{client, {M,F,A}, temporary, ?MAX_WAIT, worker, [M]}]}}. + [{client, {M,F,A}, temporary, ?WORKER_WAIT, worker, [M]}]}}. diff --git a/src/rabbit_connection_helper_sup.erl b/src/rabbit_connection_helper_sup.erl index d89bc3d75398..14374ac169cb 100644 --- a/src/rabbit_connection_helper_sup.erl +++ b/src/rabbit_connection_helper_sup.erl @@ -59,7 +59,7 @@ start_queue_collector(SupPid, Identity) -> supervisor2:start_child( SupPid, {collector, {rabbit_queue_collector, start_link, [Identity]}, - intrinsic, ?MAX_WAIT, worker, [rabbit_queue_collector]}). + intrinsic, ?WORKER_WAIT, worker, [rabbit_queue_collector]}). %%---------------------------------------------------------------------------- diff --git a/src/rabbit_connection_sup.erl b/src/rabbit_connection_sup.erl index bacdf3992642..161f5bfe0613 100644 --- a/src/rabbit_connection_sup.erl +++ b/src/rabbit_connection_sup.erl @@ -66,7 +66,7 @@ start_link(Ref, Sock, _Transport, _Opts) -> supervisor2:start_child( SupPid, {reader, {rabbit_reader, start_link, [HelperSup, Ref, Sock]}, - intrinsic, ?MAX_WAIT, worker, [rabbit_reader]}), + intrinsic, ?WORKER_WAIT, worker, [rabbit_reader]}), {ok, SupPid, ReaderPid}. reader(Pid) -> diff --git a/src/rabbit_restartable_sup.erl b/src/rabbit_restartable_sup.erl index ed35556f30e5..8517718add54 100644 --- a/src/rabbit_restartable_sup.erl +++ b/src/rabbit_restartable_sup.erl @@ -45,4 +45,4 @@ init([{Mod, _F, _A} = Fun, Delay]) -> [{Mod, Fun, case Delay of true -> {transient, 1}; false -> transient - end, ?MAX_WAIT, worker, [Mod]}]}}. + end, ?WORKER_WAIT, worker, [Mod]}]}}. diff --git a/src/rabbit_sup.erl b/src/rabbit_sup.erl index 501623d96b84..b33b35c7905a 100644 --- a/src/rabbit_sup.erl +++ b/src/rabbit_sup.erl @@ -62,7 +62,7 @@ start_child(ChildId, Mod, Args) -> child_reply(supervisor:start_child( ?SERVER, {ChildId, {Mod, start_link, Args}, - transient, ?MAX_WAIT, worker, [Mod]})). + transient, ?WORKER_WAIT, worker, [Mod]})). start_supervisor_child(Mod) -> start_supervisor_child(Mod, []). From 81ff300a43640acd7a6c36f2dc31a73f8dc05bb2 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Sat, 18 Jun 2016 23:52:28 +0300 Subject: [PATCH 156/174] Gracefully handle cases when logging exchange does not exist Client test suites that test connection recovery by restarting RabbitMQ ocassionally lead to a situation when amq.rabbitmq.log in the "/" vhost can be unavailable for a split second. Default vhost may or may not exist in general. So handle {error, not_found} responses from rabbit_basic:publish/4 instead of potentially seriously polluting logs with confusing exceptions. While at it, return a sensible value from terminate/2. --- src/rabbit_error_logger.erl | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl index d8472842430b..3724b95e9d8f 100644 --- a/src/rabbit_error_logger.erl +++ b/src/rabbit_error_logger.erl @@ -69,7 +69,7 @@ init([DefaultVHost]) -> name = ?LOG_EXCH_NAME}}. terminate(_Arg, _State) -> - terminated_ok. + ok. code_change(_OldVsn, State, _Extra) -> {ok, State}. @@ -105,10 +105,11 @@ publish1(RoutingKey, Format, Data, LogExch) -> Args = [truncate:term(A, ?LOG_TRUNC) || A <- Data], Headers = [{<<"node">>, longstr, list_to_binary(atom_to_list(node()))}], - {ok, _DeliveredQPids} = - rabbit_basic:publish(LogExch, RoutingKey, - #'P_basic'{content_type = <<"text/plain">>, - timestamp = Timestamp, - headers = Headers}, - list_to_binary(io_lib:format(Format, Args))), - ok. + case rabbit_basic:publish(LogExch, RoutingKey, + #'P_basic'{content_type = <<"text/plain">>, + timestamp = Timestamp, + headers = Headers}, + list_to_binary(io_lib:format(Format, Args))) of + {ok, _QPids} -> ok; + {error, _Err} -> ok + end. From 5f1ffc6b2834aca32674121ed723e6b7e632a54e Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Tue, 21 Jun 2016 08:17:49 +0100 Subject: [PATCH 157/174] Priority queues: retrieve max priority from state in invoke Neither amqqueue_process, master or slave modules are aware of the priority data expected by this function, it must be stored and retrieved internal. --- src/rabbit_priority_queue.erl | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/rabbit_priority_queue.erl b/src/rabbit_priority_queue.erl index ae8a38daf093..0d75753be65f 100644 --- a/src/rabbit_priority_queue.erl +++ b/src/rabbit_priority_queue.erl @@ -43,7 +43,7 @@ info/2, invoke/3, is_duplicate/2, set_queue_mode/2, zip_msgs_and_acks/4]). --record(state, {bq, bqss}). +-record(state, {bq, bqss, max_priority}). -record(passthrough, {bq, bqs}). %% See 'note on suffixes' below @@ -157,7 +157,8 @@ init(Q, Recover, AsyncCallback) -> [{P, Init(P, Term)} || {P, Term} <- PsTerms] end, #state{bq = BQ, - bqss = BQSs} + bqss = BQSs, + max_priority = hd(Ps)} end. %% [0] collapse_recovery has the effect of making a list of recovery %% terms in priority order, even for non priority queues. It's easier @@ -417,7 +418,7 @@ info(Item, #state{bq = BQ, bqss = BQSs}) -> info(Item, #passthrough{bq = BQ, bqs = BQS}) -> BQ:info(Item, BQS). -invoke(Mod, {P, Fun}, State = #state{bq = BQ}) -> +invoke(Mod, Fun, State = #state{bq = BQ, max_priority = P}) -> pick1(fun (_P, BQSN) -> BQ:invoke(Mod, Fun, BQSN) end, P, State); invoke(Mod, Fun, State = #passthrough{bq = BQ, bqs = BQS}) -> ?passthrough1(invoke(Mod, Fun, BQS)). From 92b3ef1212d4faba84939a90c6fe9883860d7444 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Tue, 21 Jun 2016 23:30:27 +0300 Subject: [PATCH 158/174] Include rabbitmq_top into server distribution Fixes #852. [##121992375] --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 1cdc1f797c7b..b11de0fcfcd3 100644 --- a/Makefile +++ b/Makefile @@ -54,6 +54,7 @@ DISTRIBUTED_DEPS := rabbitmq_amqp1_0 \ rabbitmq_shovel \ rabbitmq_shovel_management \ rabbitmq_stomp \ + rabbitmq_top \ rabbitmq_tracing \ rabbitmq_trust_store \ rabbitmq_web_dispatch \ From 716a43bbd34786b5052b2ca4f2b46e6516958aa0 Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Tue, 21 Jun 2016 08:20:37 +0100 Subject: [PATCH 159/174] Erase index on crash recovery for any backing queue It is up to the individual implementation to decide what to erase, i.e. priority queues have one index per priority --- src/rabbit_mirror_queue_slave.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 9edb99c4d707..c04c82f45ea7 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -120,7 +120,7 @@ handle_go(Q = #amqqueue{name = QName}) -> Self, {rabbit_amqqueue, set_ram_duration_target, [Self]}), {ok, BQ} = application:get_env(backing_queue_module), Q1 = Q #amqqueue { pid = QPid }, - ok = rabbit_queue_index:erase(QName), %% For crash recovery + _ = BQ:delete_crashed(Q), %% For crash recovery BQS = bq_init(BQ, Q1, new), State = #state { q = Q1, gm = GM, From b9d7e4d7e4699e927e978ea3bb7393da11491655 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Thu, 23 Jun 2016 02:11:30 +0300 Subject: [PATCH 160/174] terminate/2 now returns ok --- src/rabbit_error_logger.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl index 3724b95e9d8f..20af0c21a140 100644 --- a/src/rabbit_error_logger.erl +++ b/src/rabbit_error_logger.erl @@ -54,7 +54,7 @@ start() -> stop() -> case error_logger:delete_report_handler(rabbit_error_logger) of - terminated_ok -> ok; + ok -> ok; {error, module_not_found} -> ok end. From f5a4d26d638be14a139e72335c0bb65a6ff363c5 Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Thu, 23 Jun 2016 15:41:28 +0100 Subject: [PATCH 161/174] Tests for 802 issues and related race conditions --- test/priority_queue_SUITE.erl | 94 ++++++++++++++++++++++++++++++++--- 1 file changed, 88 insertions(+), 6 deletions(-) diff --git a/test/priority_queue_SUITE.erl b/test/priority_queue_SUITE.erl index 56b44d423eb5..db5db781556a 100644 --- a/test/priority_queue_SUITE.erl +++ b/test/priority_queue_SUITE.erl @@ -44,7 +44,8 @@ groups() -> requeue, resume, simple_order, - straight_through + straight_through, + invoke ]}, {non_parallel_tests, [], [ recovery %% Restart RabbitMQ. @@ -52,7 +53,9 @@ groups() -> ]}, {cluster_size_3, [], [ {parallel_tests, [parallel], [ - mirror_queue_auto_ack + mirror_queue_auto_ack, + mirror_fast_reset_policy, + mirror_reset_policy ]} ]} ]. @@ -206,6 +209,29 @@ straight_through(Config) -> rabbit_ct_client_helpers:close_channel(Ch), passed. +invoke(Config) -> + %% Synthetic test to check the invoke callback, as the bug tested here + %% is only triggered with a race condition. + %% When mirroring is stopped, the backing queue of rabbit_amqqueue_process + %% changes from rabbit_mirror_queue_master to rabbit_priority_queue, + %% which shouldn't receive any invoke call. However, there might + %% be pending messages so the priority queue receives the + %% `run_backing_queue` cast message sent to the old master. + A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, A), + Q = <<"invoke-queue">>, + declare(Ch, Q, 3), + Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)), + rabbit_ct_broker_helpers:rpc( + Config, A, gen_server, cast, + [Pid, + {run_backing_queue, ?MODULE, fun(_, _) -> ok end}]), + Pid2 = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)), + Pid = Pid2, + delete(Ch, Q), + rabbit_ct_client_helpers:close_channel(Ch), + passed. + dropwhile_fetchwhile(Config) -> Ch = rabbit_ct_client_helpers:open_channel(Config, 0), Q = <<"dropwhile_fetchwhile-queue">>, @@ -460,6 +486,47 @@ mirror_queue_sync_order(Config) -> delete(Ch2, Q), passed. + +mirror_reset_policy(Config) -> + %% Gives time to the master to go through all stages. + %% Might eventually trigger some race conditions from #802, + %% although for that I would expect a longer run and higher + %% number of messages in the system. + mirror_reset_policy(Config, 5000). + +mirror_fast_reset_policy(Config) -> + %% This test seems to trigger the bug tested in invoke/1, but it + %% cannot guarantee it will always happen. Thus, both tests + %% should stay in the test suite. + mirror_reset_policy(Config, 5). + + +mirror_reset_policy(Config, Wait) -> + A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, A), + Q = <<"mirror_reset_policy-queue">>, + declare(Ch, Q, 5), + Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)), + publish_many(Ch, Q, 20000), + [begin + rabbit_ct_broker_helpers:set_ha_policy( + Config, A, <<"^mirror_reset_policy-queue$">>, <<"all">>, + [{<<"ha-sync-mode">>, <<"automatic">>}]), + timer:sleep(Wait), + rabbit_ct_broker_helpers:clear_policy( + Config, A, <<"^mirror_reset_policy-queue$">>), + timer:sleep(Wait) + end || _ <- lists:seq(1, 10)], + timer:sleep(1000), + ok = rabbit_ct_broker_helpers:set_ha_policy( + Config, A, <<"^mirror_reset_policy-queue$">>, <<"all">>, + [{<<"ha-sync-mode">>, <<"automatic">>}]), + wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q), 2), + %% Verify master has not crashed + Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)), + delete(Ch, Q), + passed. + %%---------------------------------------------------------------------------- open(Config) -> @@ -564,18 +631,26 @@ priority2bin(Int) -> list_to_binary(integer_to_list(Int)). %%---------------------------------------------------------------------------- wait_for_sync(Config, Nodename, Q) -> - case synced(Config, Nodename, Q) of + wait_for_sync(Config, Nodename, Q, 1). + +wait_for_sync(Config, Nodename, Q, Nodes) -> + wait_for_sync(Config, Nodename, Q, Nodes, 600). + +wait_for_sync(_, _, _, _, 0) -> + throw(sync_timeout); +wait_for_sync(Config, Nodename, Q, Nodes, N) -> + case synced(Config, Nodename, Q, Nodes) of true -> ok; false -> timer:sleep(100), - wait_for_sync(Config, Nodename, Q) + wait_for_sync(Config, Nodename, Q, Nodes, N-1) end. -synced(Config, Nodename, Q) -> +synced(Config, Nodename, Q, Nodes) -> Info = rabbit_ct_broker_helpers:rpc(Config, Nodename, rabbit_amqqueue, info_all, [<<"/">>, [name, synchronised_slave_pids]]), [SSPids] = [Pids || [{name, Q1}, {synchronised_slave_pids, Pids}] <- Info, Q =:= Q1], - length(SSPids) =:= 1. + length(SSPids) =:= Nodes. synced_msgs(Config, Nodename, Q, Expected) -> Info = rabbit_ct_broker_helpers:rpc(Config, Nodename, @@ -593,4 +668,11 @@ slave_pids(Config, Nodename, Q) -> Q =:= Q1], SPids. +queue_pid(Config, Nodename, Q) -> + Info = rabbit_ct_broker_helpers:rpc( + Config, Nodename, + rabbit_amqqueue, info_all, [<<"/">>, [name, pid]]), + [Pid] = [P || [{name, Q1}, {pid, P}] <- Info, Q =:= Q1], + Pid. + %%---------------------------------------------------------------------------- From 8e8787966d087c8400eddfd9f471ef7d60094342 Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Fri, 24 Jun 2016 12:30:32 +0100 Subject: [PATCH 162/174] Avoid index reset when publisher confirms are pending --- src/rabbit_variable_queue.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 45dde112a56c..7bf7f2ec54b7 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -556,7 +556,7 @@ delete_crashed(#amqqueue{name = QName}) -> ok = rabbit_queue_index:erase(QName). purge(State = #vqstate { len = Len }) -> - case is_pending_ack_empty(State) of + case is_pending_ack_empty(State) and is_unconfirmed_empty(State) of true -> {Len, purge_and_index_reset(State)}; false -> @@ -1648,6 +1648,9 @@ reset_qi_state(State = #vqstate{index_state = IndexState}) -> is_pending_ack_empty(State) -> count_pending_acks(State) =:= 0. +is_unconfirmed_empty(#vqstate { unconfirmed = UC }) -> + gb_sets:is_empty(UC). + count_pending_acks(#vqstate { ram_pending_ack = RPA, disk_pending_ack = DPA, qi_pending_ack = QPA }) -> From ab03861f77395b99249274d1a023dcd414036e8a Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Mon, 27 Jun 2016 14:29:39 +0300 Subject: [PATCH 163/174] Use a separate connection per test --- test/priority_queue_SUITE.erl | 147 +++++++++++------------- test/priority_queue_recovery_SUITE.erl | 153 +++++++++++++++++++++++++ 2 files changed, 223 insertions(+), 77 deletions(-) create mode 100644 test/priority_queue_recovery_SUITE.erl diff --git a/test/priority_queue_SUITE.erl b/test/priority_queue_SUITE.erl index db5db781556a..cac228813be5 100644 --- a/test/priority_queue_SUITE.erl +++ b/test/priority_queue_SUITE.erl @@ -29,35 +29,28 @@ all() -> groups() -> [ - {cluster_size_2, [], [ - {parallel_tests, [parallel], [ - ackfold, - drop, - dropwhile_fetchwhile, - info_head_message_timestamp, - matching, - mirror_queue_sync, - mirror_queue_sync_priority_above_max, - mirror_queue_sync_priority_above_max_pending_ack, - mirror_queue_sync_order, - purge, - requeue, - resume, - simple_order, - straight_through, - invoke - ]}, - {non_parallel_tests, [], [ - recovery %% Restart RabbitMQ. - ]} - ]}, - {cluster_size_3, [], [ - {parallel_tests, [parallel], [ - mirror_queue_auto_ack, - mirror_fast_reset_policy, - mirror_reset_policy - ]} - ]} + {cluster_size_2, [], [ + ackfold, + drop, + dropwhile_fetchwhile, + info_head_message_timestamp, + matching, + mirror_queue_sync, + mirror_queue_sync_priority_above_max, + mirror_queue_sync_priority_above_max_pending_ack, + %mirror_queue_sync_order, + purge, + requeue, + resume, + simple_order, + straight_through, + invoke + ]}, + {cluster_size_3, [], [ + %mirror_queue_auto_ack, + mirror_fast_reset_policy, + mirror_reset_policy + ]} ]. %% ------------------------------------------------------------------- @@ -72,35 +65,35 @@ end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). init_per_group(cluster_size_2, Config) -> + Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, 2} + {rmq_nodes_count, 2}, + {rmq_nodename_suffix, Suffix} ]), rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps()); init_per_group(cluster_size_3, Config) -> + Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, 3} + {rmq_nodes_count, 3}, + {rmq_nodename_suffix, Suffix} ]), rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()); -init_per_group(_, Config) -> - Config. + rabbit_ct_client_helpers:setup_steps()). -end_per_group(ClusterSizeGroup, Config) -when ClusterSizeGroup =:= cluster_size_2 -orelse ClusterSizeGroup =:= cluster_size_3 -> +end_per_group(_Group, Config) -> rabbit_ct_helpers:run_steps(Config, rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()); -end_per_group(_, Config) -> - Config. + rabbit_ct_broker_helpers:teardown_steps()). init_per_testcase(Testcase, Config) -> + rabbit_ct_client_helpers:setup_steps(), rabbit_ct_helpers:testcase_started(Config, Testcase). end_per_testcase(Testcase, Config) -> + rabbit_ct_client_helpers:teardown_steps(), rabbit_ct_helpers:testcase_finished(Config, Testcase). %% ------------------------------------------------------------------- @@ -138,25 +131,8 @@ end_per_testcase(Testcase, Config) -> %% %% [0] publish enough to get credit flow from msg store -recovery(Config) -> - {Conn, Ch} = open(Config), - Q = <<"recovery-queue">>, - declare(Ch, Q, 3), - publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]), - amqp_connection:close(Conn), - - %% TODO This terminates the automatically open connection and breaks - %% coverage. - rabbit_ct_broker_helpers:restart_broker(Config, 0), - - {Conn2, Ch2} = open(Config), - get_all(Ch2, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]), - delete(Ch2, Q), - amqp_connection:close(Conn2), - passed. - simple_order(Config) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), Q = <<"simple_order-queue">>, declare(Ch, Q, 3), publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]), @@ -167,10 +143,11 @@ simple_order(Config) -> get_all(Ch, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]), delete(Ch, Q), rabbit_ct_client_helpers:close_channel(Ch), + rabbit_ct_client_helpers:close_connection(Conn), passed. matching(Config) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), Q = <<"matching-queue">>, declare(Ch, Q, 5), %% We round priority down, and 0 is the default @@ -178,10 +155,11 @@ matching(Config) -> get_all(Ch, Q, do_ack, [5, 10, undefined, 0, undefined]), delete(Ch, Q), rabbit_ct_client_helpers:close_channel(Ch), + rabbit_ct_client_helpers:close_connection(Conn), passed. resume(Config) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), Q = <<"resume-queue">>, declare(Ch, Q, 5), amqp_channel:call(Ch, #'confirm.select'{}), @@ -190,10 +168,11 @@ resume(Config) -> amqp_channel:call(Ch, #'queue.purge'{queue = Q}), %% Assert it exists delete(Ch, Q), rabbit_ct_client_helpers:close_channel(Ch), + rabbit_ct_client_helpers:close_connection(Conn), passed. straight_through(Config) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), Q = <<"straight_through-queue">>, declare(Ch, Q, 3), [begin @@ -207,6 +186,7 @@ straight_through(Config) -> get_empty(Ch, Q), delete(Ch, Q), rabbit_ct_client_helpers:close_channel(Ch), + rabbit_ct_client_helpers:close_connection(Conn), passed. invoke(Config) -> @@ -218,7 +198,7 @@ invoke(Config) -> %% be pending messages so the priority queue receives the %% `run_backing_queue` cast message sent to the old master. A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, A), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), Q = <<"invoke-queue">>, declare(Ch, Q, 3), Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)), @@ -230,10 +210,11 @@ invoke(Config) -> Pid = Pid2, delete(Ch, Q), rabbit_ct_client_helpers:close_channel(Ch), + rabbit_ct_client_helpers:close_connection(Conn), passed. dropwhile_fetchwhile(Config) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), Q = <<"dropwhile_fetchwhile-queue">>, [begin declare(Ch, Q, Args ++ arguments(3)), @@ -247,10 +228,11 @@ dropwhile_fetchwhile(Config) -> {<<"x-dead-letter-exchange">>, longstr, <<"amq.fanout">>}] ]], rabbit_ct_client_helpers:close_channel(Ch), + rabbit_ct_client_helpers:close_connection(Conn), passed. ackfold(Config) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), Q = <<"ackfolq-queue1">>, Q2 = <<"ackfold-queue2">>, declare(Ch, Q, @@ -268,10 +250,11 @@ ackfold(Config) -> delete(Ch, Q), delete(Ch, Q2), rabbit_ct_client_helpers:close_channel(Ch), + rabbit_ct_client_helpers:close_connection(Conn), passed. requeue(Config) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), Q = <<"requeue-queue">>, declare(Ch, Q, 3), publish(Ch, Q, [1, 2, 3]), @@ -282,10 +265,11 @@ requeue(Config) -> get_all(Ch, Q, do_ack, [3, 2, 1]), delete(Ch, Q), rabbit_ct_client_helpers:close_channel(Ch), + rabbit_ct_client_helpers:close_connection(Conn), passed. drop(Config) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), Q = <<"drop-queue">>, declare(Ch, Q, [{<<"x-max-length">>, long, 4} | arguments(3)]), publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]), @@ -294,10 +278,11 @@ drop(Config) -> get_all(Ch, Q, do_ack, [2, 1, 1, 1]), delete(Ch, Q), rabbit_ct_client_helpers:close_channel(Ch), + rabbit_ct_client_helpers:close_connection(Conn), passed. purge(Config) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), Q = <<"purge-queue">>, declare(Ch, Q, 3), publish(Ch, Q, [1, 2, 3]), @@ -305,6 +290,7 @@ purge(Config) -> get_empty(Ch, Q), delete(Ch, Q), rabbit_ct_client_helpers:close_channel(Ch), + rabbit_ct_client_helpers:close_connection(Conn), passed. info_head_message_timestamp(Config) -> @@ -375,7 +361,7 @@ ram_duration(_Config) -> passed. mirror_queue_sync(Config) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), Q = <<"mirror_queue_sync-queue">>, declare(Ch, Q, 3), publish(Ch, Q, [1, 2, 3]), @@ -389,13 +375,14 @@ mirror_queue_sync(Config) -> rabbit_ct_broker_helpers:control_action(sync_queue, Nodename0, [binary_to_list(Q)], [{"-p", "/"}]), wait_for_sync(Config, Nodename0, rabbit_misc:r(<<"/">>, queue, Q)), + rabbit_ct_client_helpers:close_connection(Conn), passed. mirror_queue_sync_priority_above_max(Config) -> A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), %% Tests synchronisation of slaves when priority is higher than max priority. %% This causes an infinity loop (and test timeout) before rabbitmq-server-795 - Ch = rabbit_ct_client_helpers:open_channel(Config, A), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), Q = <<"mirror_queue_sync_priority_above_max-queue">>, declare(Ch, Q, 3), publish(Ch, Q, [5, 5, 5]), @@ -405,6 +392,7 @@ mirror_queue_sync_priority_above_max(Config) -> [binary_to_list(Q)], [{"-p", "/"}]), wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q)), delete(Ch, Q), + rabbit_ct_client_helpers:close_connection(Conn), passed. mirror_queue_sync_priority_above_max_pending_ack(Config) -> @@ -412,7 +400,7 @@ mirror_queue_sync_priority_above_max_pending_ack(Config) -> %% Tests synchronisation of slaves when priority is higher than max priority %% and there are pending acks. %% This causes an infinity loop (and test timeout) before rabbitmq-server-795 - Ch = rabbit_ct_client_helpers:open_channel(Config, A), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), Q = <<"mirror_queue_sync_priority_above_max_pending_ack-queue">>, declare(Ch, Q, 3), publish(Ch, Q, [5, 5, 5]), @@ -427,6 +415,7 @@ mirror_queue_sync_priority_above_max_pending_ack(Config) -> synced_msgs(Config, A, rabbit_misc:r(<<"/">>, queue, Q), 3), synced_msgs(Config, B, rabbit_misc:r(<<"/">>, queue, Q), 3), delete(Ch, Q), + rabbit_ct_client_helpers:close_connection(Conn), passed. mirror_queue_auto_ack(Config) -> @@ -436,7 +425,7 @@ mirror_queue_auto_ack(Config) -> %% the slaves will crash with the depth notification as they will not %% match the master delta. %% Bug rabbitmq-server 687 - Ch = rabbit_ct_client_helpers:open_channel(Config, A), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), Q = <<"mirror_queue_auto_ack-queue">>, declare(Ch, Q, 3), publish(Ch, Q, [1, 2, 3]), @@ -447,6 +436,7 @@ mirror_queue_auto_ack(Config) -> %% Retrieve slaves SPids = slave_pids(Config, A, rabbit_misc:r(<<"/">>, queue, Q)), [{SNode1, _SPid1}, {SNode2, SPid2}] = nodes_and_pids(SPids), + rabbit_ct_client_helpers:close_channel(Ch), %% Restart one of the slaves so `request_depth` is triggered rabbit_ct_broker_helpers:restart_node(Config, SNode1), @@ -457,13 +447,15 @@ mirror_queue_auto_ack(Config) -> SPid2 = proplists:get_value(SNode2, Slaves), delete(Ch, Q), + rabbit_ct_client_helpers:close_connection(Conn), + passed. mirror_queue_sync_order(Config) -> A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, A), - Ch2 = rabbit_ct_client_helpers:open_channel(Config, B), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), + {Conn2, Ch2} = rabbit_ct_client_helpers:open_connection_and_channel(Config, B), Q = <<"mirror_queue_sync_order-queue">>, declare(Ch, Q, 3), publish_payload(Ch, Q, [{1, <<"msg1">>}, {2, <<"msg2">>}, @@ -485,6 +477,8 @@ mirror_queue_sync_order(Config) -> <<"msg4">>, <<"msg1">>]), delete(Ch2, Q), + rabbit_ct_client_helpers:close_connection(Conn), + rabbit_ct_client_helpers:close_connection(Conn2), passed. mirror_reset_policy(Config) -> @@ -503,7 +497,7 @@ mirror_fast_reset_policy(Config) -> mirror_reset_policy(Config, Wait) -> A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, A), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), Q = <<"mirror_reset_policy-queue">>, declare(Ch, Q, 5), Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)), @@ -525,13 +519,12 @@ mirror_reset_policy(Config, Wait) -> %% Verify master has not crashed Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)), delete(Ch, Q), + + rabbit_ct_client_helpers:close_connection(Conn), passed. %%---------------------------------------------------------------------------- -open(Config) -> - rabbit_ct_client_helpers:open_connection_and_channel(Config, 0). - declare(Ch, Q, Args) when is_list(Args) -> amqp_channel:call(Ch, #'queue.declare'{queue = Q, durable = true, diff --git a/test/priority_queue_recovery_SUITE.erl b/test/priority_queue_recovery_SUITE.erl new file mode 100644 index 000000000000..9e2ffbd3fe69 --- /dev/null +++ b/test/priority_queue_recovery_SUITE.erl @@ -0,0 +1,153 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2011-2016 Pivotal Software, Inc. All rights reserved. +%% + +-module(priority_queue_recovery_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile(export_all). + +all() -> + [ + {group, non_parallel_tests} + ]. + +groups() -> + [ + {non_parallel_tests, [], [ + recovery %% Restart RabbitMQ. + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(_, Config) -> + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_count, 2} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_group(_, Config) -> + rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +recovery(Config) -> + {Conn, Ch} = open(Config), + Q = <<"recovery-queue">>, + declare(Ch, Q, 3), + publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]), + rabbit_ct_client_helpers:close_channel(Ch), + rabbit_ct_client_helpers:close_connection(Conn), + + rabbit_ct_broker_helpers:restart_broker(Config, 0), + + {Conn2, Ch2} = open(Config, 1), + get_all(Ch2, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]), + delete(Ch2, Q), + rabbit_ct_client_helpers:close_channel(Ch2), + rabbit_ct_client_helpers:close_connection(Conn2), + passed. + + +%%---------------------------------------------------------------------------- + +open(Config) -> + open(Config, 0). + +open(Config, NodeIndex) -> + rabbit_ct_client_helpers:open_connection_and_channel(Config, NodeIndex). + +declare(Ch, Q, Args) when is_list(Args) -> + amqp_channel:call(Ch, #'queue.declare'{queue = Q, + durable = true, + arguments = Args}); +declare(Ch, Q, Max) -> + declare(Ch, Q, arguments(Max)). + +delete(Ch, Q) -> + amqp_channel:call(Ch, #'queue.delete'{queue = Q}). + +publish(Ch, Q, Ps) -> + amqp_channel:call(Ch, #'confirm.select'{}), + [publish1(Ch, Q, P) || P <- Ps], + amqp_channel:wait_for_confirms(Ch). + +publish1(Ch, Q, P) -> + amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q}, + #amqp_msg{props = props(P), + payload = priority2bin(P)}). + +publish1(Ch, Q, P, Pd) -> + amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q}, + #amqp_msg{props = props(P), + payload = Pd}). + +get_all(Ch, Q, Ack, Ps) -> + DTags = get_partial(Ch, Q, Ack, Ps), + get_empty(Ch, Q), + DTags. + +get_partial(Ch, Q, Ack, Ps) -> + [get_ok(Ch, Q, Ack, priority2bin(P)) || P <- Ps]. + +get_empty(Ch, Q) -> + #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = Q}). + +get_ok(Ch, Q, Ack, PBin) -> + {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} = + amqp_channel:call(Ch, #'basic.get'{queue = Q, + no_ack = Ack =:= no_ack}), + PBin = PBin2, + maybe_ack(Ch, Ack, DTag). + +maybe_ack(Ch, do_ack, DTag) -> + amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}), + DTag; +maybe_ack(_Ch, _, DTag) -> + DTag. + +arguments(none) -> []; +arguments(Max) -> [{<<"x-max-priority">>, byte, Max}]. + +priority2bin(undefined) -> <<"undefined">>; +priority2bin(Int) -> list_to_binary(integer_to_list(Int)). + +props(undefined) -> #'P_basic'{delivery_mode = 2}; +props(P) -> #'P_basic'{priority = P, + delivery_mode = 2}. From 9f30621e86abcb7bccb1534b9bfede44b50c589e Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Mon, 27 Jun 2016 14:30:21 +0100 Subject: [PATCH 164/174] Restore invoke clause --- src/rabbit_priority_queue.erl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rabbit_priority_queue.erl b/src/rabbit_priority_queue.erl index 0d75753be65f..b7a3afd12956 100644 --- a/src/rabbit_priority_queue.erl +++ b/src/rabbit_priority_queue.erl @@ -418,6 +418,8 @@ info(Item, #state{bq = BQ, bqss = BQSs}) -> info(Item, #passthrough{bq = BQ, bqs = BQS}) -> BQ:info(Item, BQS). +invoke(Mod, {P, Fun}, State = #state{bq = BQ}) -> + pick1(fun (_P, BQSN) -> BQ:invoke(Mod, Fun, BQSN) end, P, State); invoke(Mod, Fun, State = #state{bq = BQ, max_priority = P}) -> pick1(fun (_P, BQSN) -> BQ:invoke(Mod, Fun, BQSN) end, P, State); invoke(Mod, Fun, State = #passthrough{bq = BQ, bqs = BQS}) -> From 5b330057b2eaf5781c4a18398c49bf79769c1c2a Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Mon, 27 Jun 2016 14:36:26 +0300 Subject: [PATCH 165/174] Re-enable more tests --- test/priority_queue_SUITE.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/priority_queue_SUITE.erl b/test/priority_queue_SUITE.erl index cac228813be5..0c2d332e7070 100644 --- a/test/priority_queue_SUITE.erl +++ b/test/priority_queue_SUITE.erl @@ -38,7 +38,7 @@ groups() -> mirror_queue_sync, mirror_queue_sync_priority_above_max, mirror_queue_sync_priority_above_max_pending_ack, - %mirror_queue_sync_order, + mirror_queue_sync_order, purge, requeue, resume, @@ -47,7 +47,7 @@ groups() -> invoke ]}, {cluster_size_3, [], [ - %mirror_queue_auto_ack, + mirror_queue_auto_ack, mirror_fast_reset_policy, mirror_reset_policy ]} From b6aaf53513ae5b8d909a714f53389fc5bd3445c8 Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Mon, 27 Jun 2016 16:17:10 +0100 Subject: [PATCH 166/174] Fix tests cleanup --- test/priority_queue_SUITE.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/priority_queue_SUITE.erl b/test/priority_queue_SUITE.erl index 0c2d332e7070..46fafd89f728 100644 --- a/test/priority_queue_SUITE.erl +++ b/test/priority_queue_SUITE.erl @@ -436,7 +436,6 @@ mirror_queue_auto_ack(Config) -> %% Retrieve slaves SPids = slave_pids(Config, A, rabbit_misc:r(<<"/">>, queue, Q)), [{SNode1, _SPid1}, {SNode2, SPid2}] = nodes_and_pids(SPids), - rabbit_ct_client_helpers:close_channel(Ch), %% Restart one of the slaves so `request_depth` is triggered rabbit_ct_broker_helpers:restart_node(Config, SNode1), @@ -447,8 +446,8 @@ mirror_queue_auto_ack(Config) -> SPid2 = proplists:get_value(SNode2, Slaves), delete(Ch, Q), + rabbit_ct_client_helpers:close_channel(Ch), rabbit_ct_client_helpers:close_connection(Conn), - passed. mirror_queue_sync_order(Config) -> @@ -477,6 +476,7 @@ mirror_queue_sync_order(Config) -> <<"msg4">>, <<"msg1">>]), delete(Ch2, Q), + rabbit_ct_broker_helpers:start_node(Config, A), rabbit_ct_client_helpers:close_connection(Conn), rabbit_ct_client_helpers:close_connection(Conn2), passed. From 6fed347626931ef12a04aba0ac0e5d47d59bc24f Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Wed, 22 Jun 2016 15:02:31 +0100 Subject: [PATCH 167/174] Ensure old incarnations of slaves are stopped before new ones start * Solves race condition between master asking to stop and the restart of the queues. --- src/rabbit_mirror_queue_master.erl | 34 ++++++++++++++++++++---------- src/rabbit_mirror_queue_slave.erl | 26 +++++++++++++++++++++-- src/rabbit_upgrade_functions.erl | 19 +++++++++++++++++ 3 files changed, 66 insertions(+), 13 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 9674a4ef2c40..b5c5ffd418fc 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -214,16 +214,24 @@ stop_all_slaves(Reason, #state{name = QName, gm = GM, wait_timeout = WT}) -> %% monitor them but they would not have received the GM %% message. So only wait for slaves which are still %% not-partitioned. - [receive - {'DOWN', MRef, process, _Pid, _Info} -> - ok - after WT -> - rabbit_mirror_queue_misc:log_warning( - QName, "Missing 'DOWN' message from ~p in node ~p~n", - [Pid, node(Pid)]), - ok - end - || {Pid, MRef} <- PidsMRefs, rabbit_mnesia:on_running_node(Pid)], + PendingSlavePids = + lists:foldl( + fun({Pid, MRef}, Acc) -> + case rabbit_mnesia:on_running_node(Pid) of + true -> + receive + {'DOWN', MRef, process, _Pid, _Info} -> + Acc + after WT -> + rabbit_mirror_queue_misc:log_warning( + QName, "Missing 'DOWN' message from ~p in" + " node ~p~n", [Pid, node(Pid)]), + [Pid | Acc] + end; + false -> + Acc + end + end, [], PidsMRefs), %% Normally when we remove a slave another slave or master will %% notice and update Mnesia. But we just removed them all, and %% have stopped listening ourselves. So manually clean up. @@ -231,7 +239,11 @@ stop_all_slaves(Reason, #state{name = QName, gm = GM, wait_timeout = WT}) -> fun () -> [Q] = mnesia:read({rabbit_queue, QName}), rabbit_mirror_queue_misc:store_updated_slaves( - Q #amqqueue { gm_pids = [], slave_pids = [] }) + Q #amqqueue { gm_pids = [], slave_pids = [], + %% Restarted slaves on running nodes can + %% ensure old incarnations are stopped using + %% the pending slave pids. + slave_pids_pending_shutdown = PendingSlavePids}) end), ok = gm:forget_group(QName). diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index c04c82f45ea7..d8aa3a7aadff 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -163,9 +163,11 @@ handle_go(Q = #amqqueue{name = QName}) -> init_it(Self, GM, Node, QName) -> case mnesia:read({rabbit_queue, QName}) of - [Q = #amqqueue { pid = QPid, slave_pids = SPids, gm_pids = GMPids }] -> + [Q = #amqqueue { pid = QPid, slave_pids = SPids, gm_pids = GMPids, + slave_pids_pending_shutdown = PSPids}] -> case [Pid || Pid <- [QPid | SPids], node(Pid) =:= Node] of - [] -> add_slave(Q, Self, GM), + [] -> stop_pending_slaves(QName, PSPids), + add_slave(Q, Self, GM), {new, QPid, GMPids}; [QPid] -> case rabbit_mnesia:is_process_alive(QPid) of true -> duplicate_live_master; @@ -186,6 +188,26 @@ init_it(Self, GM, Node, QName) -> master_in_recovery end. +%% Pending slaves have been asked to stop by the master, but despite the node +%% being up these did not answer on the expected timeout. Stop local slaves now. +stop_pending_slaves(QName, Pids) -> + [begin + rabbit_mirror_queue_misc:log_warning( + QName, "Detected stale HA slave, stopping it: ~p~n", [Pid]), + case erlang:process_info(Pid, dictionary) of + undefined -> ok; + {dictionary, Dict} -> + case proplists:get_value('$ancestors', Dict) of + [Sup, rabbit_amqqueue_sup_sup | _] -> + exit(Sup, kill), + exit(Pid, kill); + _ -> + ok + end + end + end || Pid <- Pids, node(Pid) =:= node(), + true =:= erlang:is_process_alive(Pid)]. + %% Add to the end, so they are in descending order of age, see %% rabbit_mirror_queue_misc:promote_slave/1 add_slave(Q = #amqqueue { slave_pids = SPids, gm_pids = GMPids }, New, GM) -> diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 0f55b9e4a961..c6e739a4876c 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -53,6 +53,7 @@ -rabbit_upgrade({queue_state, mnesia, [down_slave_nodes]}). -rabbit_upgrade({recoverable_slaves, mnesia, [queue_state]}). -rabbit_upgrade({policy_version, mnesia, [recoverable_slaves]}). +-rabbit_upgrade({slave_pids_pending_shutdown, mnesia, [policy_version]}). -rabbit_upgrade({user_password_hashing, mnesia, [hash_passwords]}). %% ------------------------------------------------------------------- @@ -466,6 +467,24 @@ policy_version(Table) -> sync_slave_pids, recoverable_slaves, policy, gm_pids, decorators, state, policy_version]). +slave_pids_pending_shutdown() -> + ok = slave_pids_pending_shutdown(rabbit_queue), + ok = slave_pids_pending_shutdown(rabbit_durable_queue). + +slave_pids_pending_shutdown(Table) -> + transform( + Table, + fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments, + Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators, + State, PolicyVersion}) -> + {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments, + Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators, + State, PolicyVersion, []} + end, + [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids, + sync_slave_pids, recoverable_slaves, policy, gm_pids, decorators, state, + policy_version, slave_pids_pending_shutdown]). + %% Prior to 3.6.0, passwords were hashed using MD5, this populates %% existing records with said default. Users created with 3.6.0+ will %% have internal_user.hashing_algorithm populated by the internal From 7b061c717174fc176e322e77e1b011c04977e6a0 Mon Sep 17 00:00:00 2001 From: Diana Corbacho Date: Tue, 28 Jun 2016 10:00:08 +0100 Subject: [PATCH 168/174] Test for stop pending slaves --- test/priority_queue_SUITE.erl | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/test/priority_queue_SUITE.erl b/test/priority_queue_SUITE.erl index 46fafd89f728..546b1f4f283d 100644 --- a/test/priority_queue_SUITE.erl +++ b/test/priority_queue_SUITE.erl @@ -49,7 +49,8 @@ groups() -> {cluster_size_3, [], [ mirror_queue_auto_ack, mirror_fast_reset_policy, - mirror_reset_policy + mirror_reset_policy, + mirror_stop_pending_slaves ]} ]. @@ -523,6 +524,34 @@ mirror_reset_policy(Config, Wait) -> rabbit_ct_client_helpers:close_connection(Conn), passed. +mirror_stop_pending_slaves(Config) -> + A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename), + C = rabbit_ct_broker_helpers:get_node_config(Config, 2, nodename), + ok = rabbit_ct_broker_helpers:rpc( + Config, A, application, set_env, [rabbit, slave_wait_timeout, 0]), + ok = rabbit_ct_broker_helpers:rpc( + Config, B, application, set_env, [rabbit, slave_wait_timeout, 0]), + ok = rabbit_ct_broker_helpers:rpc( + Config, C, application, set_env, [rabbit, slave_wait_timeout, 0]), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), + Q = <<"mirror_stop_pending_slaves-queue">>, + declare(Ch, Q, 5), + publish_many(Ch, Q, 20000), + + [begin + rabbit_ct_broker_helpers:set_ha_policy( + Config, A, <<"^mirror_stop_pending_slaves-queue$">>, <<"all">>, + [{<<"ha-sync-mode">>, <<"automatic">>}]), + wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q), 2), + rabbit_ct_broker_helpers:clear_policy( + Config, A, <<"^mirror_stop_pending_slaves-queue$">>) + end || _ <- lists:seq(1, 15)], + + delete(Ch, Q), + rabbit_ct_client_helpers:close_connection(Conn), + passed. + %%---------------------------------------------------------------------------- declare(Ch, Q, Args) when is_list(Args) -> From e0051b72b1e6926ab8e023dec86d9b15833cc064 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Tue, 28 Jun 2016 16:13:38 +0300 Subject: [PATCH 169/174] Set rabbit.slave_wait_timeout back to 15s at the end of this test --- test/priority_queue_SUITE.erl | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/test/priority_queue_SUITE.erl b/test/priority_queue_SUITE.erl index 546b1f4f283d..39ed3af69ad3 100644 --- a/test/priority_queue_SUITE.erl +++ b/test/priority_queue_SUITE.erl @@ -528,12 +528,10 @@ mirror_stop_pending_slaves(Config) -> A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename), C = rabbit_ct_broker_helpers:get_node_config(Config, 2, nodename), - ok = rabbit_ct_broker_helpers:rpc( - Config, A, application, set_env, [rabbit, slave_wait_timeout, 0]), - ok = rabbit_ct_broker_helpers:rpc( - Config, B, application, set_env, [rabbit, slave_wait_timeout, 0]), - ok = rabbit_ct_broker_helpers:rpc( - Config, C, application, set_env, [rabbit, slave_wait_timeout, 0]), + + [ok = rabbit_ct_broker_helpers:rpc( + Config, Nodename, application, set_env, [rabbit, slave_wait_timeout, 0]) || Nodename <- [A, B, C]], + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), Q = <<"mirror_stop_pending_slaves-queue">>, declare(Ch, Q, 5), @@ -549,6 +547,10 @@ mirror_stop_pending_slaves(Config) -> end || _ <- lists:seq(1, 15)], delete(Ch, Q), + + [ok = rabbit_ct_broker_helpers:rpc( + Config, Nodename, application, set_env, [rabbit, slave_wait_timeout, 15000]) || Nodename <- [A, B, C]], + rabbit_ct_client_helpers:close_connection(Conn), passed. From d27a16388d28e753120f30b047ab64077abb9f88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Tue, 28 Jun 2016 15:55:20 +0200 Subject: [PATCH 170/174] Git: Ignore `xrefr` --- .gitignore | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.gitignore b/.gitignore index 720f17d9b640..f62802a7ec1d 100644 --- a/.gitignore +++ b/.gitignore @@ -3,16 +3,16 @@ .*.sw? *.beam *.coverdata -.erlang.mk/ -cover/ -debug/ -deps/ -doc/ -ebin/ -etc/ -logs/ -plugins/ -test/ct.cover.spec +/.erlang.mk/ +/cover/ +/deps/ +/doc/ +/ebin/ +/etc/ +/logs/ +/plugins/ +/test/ct.cover.spec +/xrefr PACKAGES/ From fa20bbcc482e3bd09678351191ee9a296113b3dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Tue, 28 Jun 2016 15:26:41 +0200 Subject: [PATCH 171/174] Use the new -spec format The old format is removed in Erlang 19.0, leading to build errors. Also, get rid of the `use_specs` macro and thus always define -spec() & friends. While here, unnify the style of -type and -spec. References #860. [#118562897] [#122335241] --- Makefile | 6 - include/gm_specs.hrl | 18 ++- packaging/standalone/Makefile | 2 +- src/background_gc.erl | 10 +- src/delegate.erl | 30 +++-- src/delegate_sup.erl | 8 +- src/dtree.erl | 38 +++---- src/file_handle_cache.erl | 87 +++++++-------- src/gatherer.erl | 18 ++- src/gm.erl | 37 ++----- src/lqueue.erl | 40 +++---- src/mnesia_sync.erl | 6 +- src/pg_local.erl | 20 ++-- src/rabbit.erl | 82 +++++++------- src/rabbit_access_control.erl | 43 ++++---- src/rabbit_alarm.erl | 36 +++--- src/rabbit_amqqueue_process.erl | 11 +- src/rabbit_amqqueue_sup.erl | 8 +- src/rabbit_amqqueue_sup_sup.erl | 11 +- src/rabbit_binding.erl | 103 +++++++++--------- src/rabbit_channel_sup.erl | 10 +- src/rabbit_channel_sup_sup.erl | 10 +- src/rabbit_cli.erl | 47 ++++---- src/rabbit_client_sup.erl | 17 +-- src/rabbit_connection_helper_sup.erl | 11 +- src/rabbit_connection_sup.erl | 9 +- src/rabbit_control_main.erl | 20 ++-- src/rabbit_dead_letter.erl | 4 - src/rabbit_direct.erl | 36 +++--- src/rabbit_disk_monitor.erl | 24 ++-- src/rabbit_epmd_monitor.erl | 6 +- src/rabbit_error_logger.erl | 8 +- src/rabbit_exchange.erl | 102 +++++++++-------- src/rabbit_exchange_type_headers.erl | 7 +- src/rabbit_exchange_type_invalid.erl | 6 +- src/rabbit_file.erl | 55 ++++------ src/rabbit_framing.erl | 6 +- src/rabbit_guid.erl | 18 ++- src/rabbit_limiter.erl | 74 ++++++------- src/rabbit_log.erl | 30 +++-- src/rabbit_memory_monitor.erl | 16 +-- src/rabbit_mirror_queue_coordinator.erl | 17 ++- src/rabbit_mirror_queue_master.erl | 33 +++--- src/rabbit_mirror_queue_misc.erl | 40 +++---- src/rabbit_mirror_queue_mode.erl | 19 +--- src/rabbit_mirror_queue_slave.erl | 5 +- src/rabbit_mirror_queue_sync.erl | 30 +++-- src/rabbit_mnesia.erl | 58 +++++----- src/rabbit_mnesia_rename.erl | 8 +- src/rabbit_msg_file.erl | 32 +++--- src/rabbit_msg_store.erl | 80 +++++++------- src/rabbit_msg_store_gc.erl | 20 ++-- src/rabbit_node_monitor.erl | 54 +++++---- src/rabbit_plugins.erl | 23 ++-- src/rabbit_plugins_main.erl | 8 +- src/rabbit_prelaunch.erl | 15 +-- src/rabbit_prequeue.erl | 10 +- src/rabbit_queue_consumers.erl | 4 - src/rabbit_queue_index.erl | 74 ++++++------- src/rabbit_recovery_terms.erl | 14 +-- src/rabbit_registry.erl | 20 ++-- src/rabbit_resource_monitor_misc.erl | 8 +- src/rabbit_restartable_sup.erl | 8 +- src/rabbit_router.erl | 16 +-- src/rabbit_runtime_parameters.erl | 60 +++++----- src/rabbit_ssl.erl | 20 ++-- src/rabbit_sup.erl | 28 ++--- src/rabbit_table.erl | 26 ++--- src/rabbit_trace.erl | 22 ++-- src/rabbit_upgrade.erl | 13 +-- src/rabbit_upgrade_functions.erl | 62 +++++------ src/rabbit_variable_queue.erl | 24 ++-- src/rabbit_version.erl | 45 ++++---- src/rabbit_vhost.erl | 32 +++--- src/rabbit_vm.erl | 38 +++---- src/supervised_lifecycle.erl | 8 +- src/tcp_listener.erl | 10 +- src/tcp_listener_sup.erl | 10 +- src/vm_memory_monitor.erl | 26 ++--- src/worker_pool.erl | 24 ++-- src/worker_pool_sup.erl | 12 +- src/worker_pool_worker.erl | 20 ++-- test/channel_operation_timeout_test_queue.erl | 24 ++-- 83 files changed, 949 insertions(+), 1281 deletions(-) diff --git a/Makefile b/Makefile index b11de0fcfcd3..ee4c1a0afdd5 100644 --- a/Makefile +++ b/Makefile @@ -91,12 +91,6 @@ ifdef CREDIT_FLOW_TRACING RMQ_ERLC_OPTS += -DCREDIT_FLOW_TRACING=true endif -ERTS_VER := $(shell erl -version 2>&1 | sed -E 's/.* version //') -USE_SPECS_MIN_ERTS_VER = 5.11 -ifeq ($(call compare_version,$(ERTS_VER),$(USE_SPECS_MIN_ERTS_VER),>=),true) -RMQ_ERLC_OPTS += -Duse_specs -endif - ifndef USE_PROPER_QC # PropEr needs to be installed for property checking # http://proper.softlab.ntua.gr/ diff --git a/include/gm_specs.hrl b/include/gm_specs.hrl index bc20b4415d11..d03f9938e565 100644 --- a/include/gm_specs.hrl +++ b/include/gm_specs.hrl @@ -14,15 +14,11 @@ %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. %% --ifdef(use_specs). +-type callback_result() :: 'ok' | {'stop', any()} | {'become', atom(), args()}. +-type args() :: any(). +-type members() :: [pid()]. --type(callback_result() :: 'ok' | {'stop', any()} | {'become', atom(), args()}). --type(args() :: any()). --type(members() :: [pid()]). - --spec(joined/2 :: (args(), members()) -> callback_result()). --spec(members_changed/3 :: (args(), members(),members()) -> callback_result()). --spec(handle_msg/3 :: (args(), pid(), any()) -> callback_result()). --spec(handle_terminate/2 :: (args(), term()) -> any()). - --endif. +-spec joined(args(), members()) -> callback_result(). +-spec members_changed(args(), members(),members()) -> callback_result(). +-spec handle_msg(args(), pid(), any()) -> callback_result(). +-spec handle_terminate(args(), term()) -> any(). diff --git a/packaging/standalone/Makefile b/packaging/standalone/Makefile index b86af40ca6cc..aa8660ce38c5 100644 --- a/packaging/standalone/Makefile +++ b/packaging/standalone/Makefile @@ -109,7 +109,7 @@ clean: generate_release: erlc \ -I $(TARGET_DIR)/include/ -o src -Wall \ - -v +debug_info -Duse_specs -Duse_proper_qc \ + -v +debug_info -Duse_proper_qc \ -pa $(TARGET_DIR)/ebin/ src/rabbit_release.erl ERL_LIBS="$(TARGET_DIR)/plugins:$$ERL_LIBS" \ erl \ diff --git a/src/background_gc.erl b/src/background_gc.erl index 8388207d52a1..2986f356f5e6 100644 --- a/src/background_gc.erl +++ b/src/background_gc.erl @@ -32,13 +32,9 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}). --spec(run/0 :: () -> 'ok'). --spec(gc/0 :: () -> 'ok'). - --endif. +-spec start_link() -> {'ok', pid()} | {'error', any()}. +-spec run() -> 'ok'. +-spec gc() -> 'ok'. %%---------------------------------------------------------------------------- diff --git a/src/delegate.erl b/src/delegate.erl index fc3bc2890f01..778137c1c72d 100644 --- a/src/delegate.erl +++ b/src/delegate.erl @@ -57,28 +57,24 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - -export_type([monitor_ref/0]). --type(monitor_ref() :: reference() | {atom(), pid()}). --type(fun_or_mfa(A) :: fun ((pid()) -> A) | {atom(), atom(), [any()]}). +-type monitor_ref() :: reference() | {atom(), pid()}. +-type fun_or_mfa(A) :: fun ((pid()) -> A) | {atom(), atom(), [any()]}. --spec(start_link/1 :: - (non_neg_integer()) -> {'ok', pid()} | ignore | {'error', any()}). --spec(invoke/2 :: ( pid(), fun_or_mfa(A)) -> A; - ([pid()], fun_or_mfa(A)) -> {[{pid(), A}], - [{pid(), term()}]}). --spec(invoke_no_result/2 :: (pid() | [pid()], fun_or_mfa(any())) -> 'ok'). --spec(monitor/2 :: ('process', pid()) -> monitor_ref()). --spec(demonitor/1 :: (monitor_ref()) -> 'true'). +-spec start_link + (non_neg_integer()) -> {'ok', pid()} | ignore | {'error', any()}. +-spec invoke + ( pid(), fun_or_mfa(A)) -> A; + ([pid()], fun_or_mfa(A)) -> {[{pid(), A}], [{pid(), term()}]}. +-spec invoke_no_result(pid() | [pid()], fun_or_mfa(any())) -> 'ok'. +-spec monitor('process', pid()) -> monitor_ref(). +-spec demonitor(monitor_ref()) -> 'true'. --spec(call/2 :: +-spec call ( pid(), any()) -> any(); - ([pid()], any()) -> {[{pid(), any()}], [{pid(), term()}]}). --spec(cast/2 :: (pid() | [pid()], any()) -> 'ok'). - --endif. + ([pid()], any()) -> {[{pid(), any()}], [{pid(), term()}]}. +-spec cast(pid() | [pid()], any()) -> 'ok'. %%---------------------------------------------------------------------------- diff --git a/src/delegate_sup.erl b/src/delegate_sup.erl index 84ca9553ec14..ba0964f9dd6c 100644 --- a/src/delegate_sup.erl +++ b/src/delegate_sup.erl @@ -26,12 +26,8 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start_link/1 :: (integer()) -> rabbit_types:ok_pid_or_error()). --spec(count/1 :: ([node()]) -> integer()). - --endif. +-spec start_link(integer()) -> rabbit_types:ok_pid_or_error(). +-spec count([node()]) -> integer(). %%---------------------------------------------------------------------------- diff --git a/src/dtree.erl b/src/dtree.erl index 99133e75b6ff..a2232c06874f 100644 --- a/src/dtree.erl +++ b/src/dtree.erl @@ -37,29 +37,25 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - -export_type([?MODULE/0]). --opaque(?MODULE() :: {gb_trees:tree(), gb_trees:tree()}). - --type(pk() :: any()). --type(sk() :: any()). --type(val() :: any()). --type(kv() :: {pk(), val()}). - --spec(empty/0 :: () -> ?MODULE()). --spec(insert/4 :: (pk(), [sk()], val(), ?MODULE()) -> ?MODULE()). --spec(take/3 :: ([pk()], sk(), ?MODULE()) -> {[kv()], ?MODULE()}). --spec(take/2 :: (sk(), ?MODULE()) -> {[kv()], ?MODULE()}). --spec(take_all/2 :: (sk(), ?MODULE()) -> {[kv()], ?MODULE()}). --spec(drop/2 :: (pk(), ?MODULE()) -> ?MODULE()). --spec(is_defined/2 :: (sk(), ?MODULE()) -> boolean()). --spec(is_empty/1 :: (?MODULE()) -> boolean()). --spec(smallest/1 :: (?MODULE()) -> kv()). --spec(size/1 :: (?MODULE()) -> non_neg_integer()). - --endif. +-opaque ?MODULE() :: {gb_trees:tree(), gb_trees:tree()}. + +-type pk() :: any(). +-type sk() :: any(). +-type val() :: any(). +-type kv() :: {pk(), val()}. + +-spec empty() -> ?MODULE(). +-spec insert(pk(), [sk()], val(), ?MODULE()) -> ?MODULE(). +-spec take([pk()], sk(), ?MODULE()) -> {[kv()], ?MODULE()}. +-spec take(sk(), ?MODULE()) -> {[kv()], ?MODULE()}. +-spec take_all(sk(), ?MODULE()) -> {[kv()], ?MODULE()}. +-spec drop(pk(), ?MODULE()) -> ?MODULE(). +-spec is_defined(sk(), ?MODULE()) -> boolean(). +-spec is_empty(?MODULE()) -> boolean(). +-spec smallest(?MODULE()) -> kv(). +-spec size(?MODULE()) -> non_neg_integer(). %%---------------------------------------------------------------------------- diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index 78b009503672..ec2d82186a8c 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -234,58 +234,53 @@ %% Specs %%---------------------------------------------------------------------------- --ifdef(use_specs). - --type(ref() :: any()). --type(ok_or_error() :: 'ok' | {'error', any()}). --type(val_or_error(T) :: {'ok', T} | {'error', any()}). --type(position() :: ('bof' | 'eof' | non_neg_integer() | +-type ref() :: any(). +-type ok_or_error() :: 'ok' | {'error', any()}. +-type val_or_error(T) :: {'ok', T} | {'error', any()}. +-type position() :: ('bof' | 'eof' | non_neg_integer() | {('bof' |'eof'), non_neg_integer()} | - {'cur', integer()})). --type(offset() :: non_neg_integer()). + {'cur', integer()}). +-type offset() :: non_neg_integer(). --spec(register_callback/3 :: (atom(), atom(), [any()]) -> 'ok'). --spec(open/3 :: +-spec register_callback(atom(), atom(), [any()]) -> 'ok'. +-spec open (file:filename(), [any()], [{'write_buffer', (non_neg_integer() | 'infinity' | 'unbuffered')} | - {'read_buffer', (non_neg_integer() | 'unbuffered')}]) - -> val_or_error(ref())). --spec(open_with_absolute_path/3 :: + {'read_buffer', (non_neg_integer() | 'unbuffered')}]) -> + val_or_error(ref()). +-spec open_with_absolute_path (file:filename(), [any()], [{'write_buffer', (non_neg_integer() | 'infinity' | 'unbuffered')} | - {'read_buffer', (non_neg_integer() | 'unbuffered')}]) - -> val_or_error(ref())). --spec(close/1 :: (ref()) -> ok_or_error()). --spec(read/2 :: (ref(), non_neg_integer()) -> - val_or_error([char()] | binary()) | 'eof'). --spec(append/2 :: (ref(), iodata()) -> ok_or_error()). --spec(sync/1 :: (ref()) -> ok_or_error()). --spec(position/2 :: (ref(), position()) -> val_or_error(offset())). --spec(truncate/1 :: (ref()) -> ok_or_error()). --spec(current_virtual_offset/1 :: (ref()) -> val_or_error(offset())). --spec(current_raw_offset/1 :: (ref()) -> val_or_error(offset())). --spec(flush/1 :: (ref()) -> ok_or_error()). --spec(copy/3 :: (ref(), ref(), non_neg_integer()) -> - val_or_error(non_neg_integer())). --spec(delete/1 :: (ref()) -> ok_or_error()). --spec(clear/1 :: (ref()) -> ok_or_error()). --spec(set_maximum_since_use/1 :: (non_neg_integer()) -> 'ok'). --spec(obtain/0 :: () -> 'ok'). --spec(obtain/1 :: (non_neg_integer()) -> 'ok'). --spec(release/0 :: () -> 'ok'). --spec(release/1 :: (non_neg_integer()) -> 'ok'). --spec(transfer/1 :: (pid()) -> 'ok'). --spec(transfer/2 :: (pid(), non_neg_integer()) -> 'ok'). --spec(with_handle/1 :: (fun(() -> A)) -> A). --spec(with_handle/2 :: (non_neg_integer(), fun(() -> A)) -> A). --spec(set_limit/1 :: (non_neg_integer()) -> 'ok'). --spec(get_limit/0 :: () -> non_neg_integer()). --spec(info_keys/0 :: () -> rabbit_types:info_keys()). --spec(info/0 :: () -> rabbit_types:infos()). --spec(info/1 :: ([atom()]) -> rabbit_types:infos()). --spec(ulimit/0 :: () -> 'unknown' | non_neg_integer()). - --endif. + {'read_buffer', (non_neg_integer() | 'unbuffered')}]) -> + val_or_error(ref()). +-spec close(ref()) -> ok_or_error(). +-spec read + (ref(), non_neg_integer()) -> val_or_error([char()] | binary()) | 'eof'. +-spec append(ref(), iodata()) -> ok_or_error(). +-spec sync(ref()) -> ok_or_error(). +-spec position(ref(), position()) -> val_or_error(offset()). +-spec truncate(ref()) -> ok_or_error(). +-spec current_virtual_offset(ref()) -> val_or_error(offset()). +-spec current_raw_offset(ref()) -> val_or_error(offset()). +-spec flush(ref()) -> ok_or_error(). +-spec copy(ref(), ref(), non_neg_integer()) -> val_or_error(non_neg_integer()). +-spec delete(ref()) -> ok_or_error(). +-spec clear(ref()) -> ok_or_error(). +-spec set_maximum_since_use(non_neg_integer()) -> 'ok'. +-spec obtain() -> 'ok'. +-spec obtain(non_neg_integer()) -> 'ok'. +-spec release() -> 'ok'. +-spec release(non_neg_integer()) -> 'ok'. +-spec transfer(pid()) -> 'ok'. +-spec transfer(pid(), non_neg_integer()) -> 'ok'. +-spec with_handle(fun(() -> A)) -> A. +-spec with_handle(non_neg_integer(), fun(() -> A)) -> A. +-spec set_limit(non_neg_integer()) -> 'ok'. +-spec get_limit() -> non_neg_integer(). +-spec info_keys() -> rabbit_types:info_keys(). +-spec info() -> rabbit_types:infos(). +-spec info([atom()]) -> rabbit_types:infos(). +-spec ulimit() -> 'unknown' | non_neg_integer(). %%---------------------------------------------------------------------------- -define(INFO_KEYS, [total_limit, total_used, sockets_limit, sockets_used]). diff --git a/src/gatherer.erl b/src/gatherer.erl index d3e1a4e4385b..18302699a297 100644 --- a/src/gatherer.erl +++ b/src/gatherer.erl @@ -39,17 +39,13 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). --spec(stop/1 :: (pid()) -> 'ok'). --spec(fork/1 :: (pid()) -> 'ok'). --spec(finish/1 :: (pid()) -> 'ok'). --spec(in/2 :: (pid(), any()) -> 'ok'). --spec(sync_in/2 :: (pid(), any()) -> 'ok'). --spec(out/1 :: (pid()) -> {'value', any()} | 'empty'). - --endif. +-spec start_link() -> rabbit_types:ok_pid_or_error(). +-spec stop(pid()) -> 'ok'. +-spec fork(pid()) -> 'ok'. +-spec finish(pid()) -> 'ok'. +-spec in(pid(), any()) -> 'ok'. +-spec sync_in(pid(), any()) -> 'ok'. +-spec out(pid()) -> {'value', any()} | 'empty'. %%---------------------------------------------------------------------------- diff --git a/src/gm.erl b/src/gm.erl index 1e4168c0e69a..dc47b93a5b6c 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -391,10 +391,6 @@ %% For INSTR_MOD callbacks -export([call/3, cast/2, monitor/1, demonitor/1]). --ifndef(use_specs). --export([behaviour_info/1]). --endif. - -export([table_definitions/0]). -define(GROUP_TABLE, gm_group). @@ -436,22 +432,20 @@ -define(TAG, '$gm'). --ifdef(use_specs). - -export_type([group_name/0]). --type(group_name() :: any()). --type(txn_fun() :: fun((fun(() -> any())) -> any())). +-type group_name() :: any(). +-type txn_fun() :: fun((fun(() -> any())) -> any()). --spec(create_tables/0 :: () -> 'ok' | {'aborted', any()}). --spec(start_link/4 :: (group_name(), atom(), any(), txn_fun()) -> - rabbit_types:ok_pid_or_error()). --spec(leave/1 :: (pid()) -> 'ok'). --spec(broadcast/2 :: (pid(), any()) -> 'ok'). --spec(confirmed_broadcast/2 :: (pid(), any()) -> 'ok'). --spec(info/1 :: (pid()) -> rabbit_types:infos()). --spec(validate_members/2 :: (pid(), [pid()]) -> 'ok'). --spec(forget_group/1 :: (group_name()) -> 'ok'). +-spec create_tables() -> 'ok' | {'aborted', any()}. +-spec start_link(group_name(), atom(), any(), txn_fun()) -> + rabbit_types:ok_pid_or_error(). +-spec leave(pid()) -> 'ok'. +-spec broadcast(pid(), any()) -> 'ok'. +-spec confirmed_broadcast(pid(), any()) -> 'ok'. +-spec info(pid()) -> rabbit_types:infos(). +-spec validate_members(pid(), [pid()]) -> 'ok'. +-spec forget_group(group_name()) -> 'ok'. %% The joined, members_changed and handle_msg callbacks can all return %% any of the following terms: @@ -497,15 +491,6 @@ -callback handle_terminate(Args :: term(), Reason :: term()) -> ok | term(). --else. - -behaviour_info(callbacks) -> - [{joined, 2}, {members_changed, 3}, {handle_msg, 3}, {handle_terminate, 2}]; -behaviour_info(_Other) -> - undefined. - --endif. - create_tables() -> create_tables([?TABLE]). diff --git a/src/lqueue.erl b/src/lqueue.erl index 4e78346febe9..1e24e9e75fef 100644 --- a/src/lqueue.erl +++ b/src/lqueue.erl @@ -25,30 +25,26 @@ -define(QUEUE, queue). --ifdef(use_specs). - -export_type([?MODULE/0]). --opaque(?MODULE() :: {non_neg_integer(), ?QUEUE:?QUEUE()}). --type(value() :: any()). --type(result() :: 'empty' | {'value', value()}). - --spec(new/0 :: () -> ?MODULE()). --spec(is_empty/1 :: (?MODULE()) -> boolean()). --spec(len/1 :: (?MODULE()) -> non_neg_integer()). --spec(in/2 :: (value(), ?MODULE()) -> ?MODULE()). --spec(in_r/2 :: (value(), ?MODULE()) -> ?MODULE()). --spec(out/1 :: (?MODULE()) -> {result(), ?MODULE()}). --spec(out_r/1 :: (?MODULE()) -> {result(), ?MODULE()}). --spec(join/2 :: (?MODULE(), ?MODULE()) -> ?MODULE()). --spec(foldl/3 :: (fun ((value(), B) -> B), B, ?MODULE()) -> B). --spec(foldr/3 :: (fun ((value(), B) -> B), B, ?MODULE()) -> B). --spec(from_list/1 :: ([value()]) -> ?MODULE()). --spec(to_list/1 :: (?MODULE()) -> [value()]). --spec(peek/1 :: (?MODULE()) -> result()). --spec(peek_r/1 :: (?MODULE()) -> result()). - --endif. +-opaque ?MODULE() :: {non_neg_integer(), ?QUEUE:?QUEUE()}. +-type value() :: any(). +-type result() :: 'empty' | {'value', value()}. + +-spec new() -> ?MODULE(). +-spec is_empty(?MODULE()) -> boolean(). +-spec len(?MODULE()) -> non_neg_integer(). +-spec in(value(), ?MODULE()) -> ?MODULE(). +-spec in_r(value(), ?MODULE()) -> ?MODULE(). +-spec out(?MODULE()) -> {result(), ?MODULE()}. +-spec out_r(?MODULE()) -> {result(), ?MODULE()}. +-spec join(?MODULE(), ?MODULE()) -> ?MODULE(). +-spec foldl(fun ((value(), B) -> B), B, ?MODULE()) -> B. +-spec foldr(fun ((value(), B) -> B), B, ?MODULE()) -> B. +-spec from_list([value()]) -> ?MODULE(). +-spec to_list(?MODULE()) -> [value()]. +-spec peek(?MODULE()) -> result(). +-spec peek_r(?MODULE()) -> result(). new() -> {0, ?QUEUE:new()}. diff --git a/src/mnesia_sync.erl b/src/mnesia_sync.erl index 0b1126eb12e3..8d5c94663556 100644 --- a/src/mnesia_sync.erl +++ b/src/mnesia_sync.erl @@ -34,11 +34,7 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(sync/0 :: () -> 'ok'). - --endif. +-spec sync() -> 'ok'. %%---------------------------------------------------------------------------- diff --git a/src/pg_local.erl b/src/pg_local.erl index 62fae2b077c9..e1f5219dcb0d 100644 --- a/src/pg_local.erl +++ b/src/pg_local.erl @@ -41,20 +41,16 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). +-type name() :: term(). --type(name() :: term()). +-spec start_link() -> {'ok', pid()} | {'error', any()}. +-spec start() -> {'ok', pid()} | {'error', any()}. +-spec join(name(), pid()) -> 'ok'. +-spec leave(name(), pid()) -> 'ok'. +-spec get_members(name()) -> [pid()]. +-spec in_group(name(), pid()) -> boolean(). --spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}). --spec(start/0 :: () -> {'ok', pid()} | {'error', any()}). --spec(join/2 :: (name(), pid()) -> 'ok'). --spec(leave/2 :: (name(), pid()) -> 'ok'). --spec(get_members/1 :: (name()) -> [pid()]). --spec(in_group/2 :: (name(), pid()) -> boolean()). - --spec(sync/0 :: () -> 'ok'). - --endif. +-spec sync() -> 'ok'. %%---------------------------------------------------------------------------- diff --git a/src/rabbit.erl b/src/rabbit.erl index 06c84b53b48f..a86fd97925a6 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -198,48 +198,44 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --type(file_suffix() :: binary()). +-type file_suffix() :: binary(). %% this really should be an abstract type --type(log_location() :: 'tty' | 'undefined' | file:filename()). --type(param() :: atom()). --type(app_name() :: atom()). - --spec(start/0 :: () -> 'ok'). --spec(boot/0 :: () -> 'ok'). --spec(stop/0 :: () -> 'ok'). --spec(stop_and_halt/0 :: () -> no_return()). --spec(await_startup/0 :: () -> 'ok'). --spec(status/0 :: +-type log_location() :: 'tty' | 'undefined' | file:filename(). +-type param() :: atom(). +-type app_name() :: atom(). + +-spec start() -> 'ok'. +-spec boot() -> 'ok'. +-spec stop() -> 'ok'. +-spec stop_and_halt() -> no_return(). +-spec await_startup() -> 'ok'. +-spec status () -> [{pid, integer()} | {running_applications, [{atom(), string(), string()}]} | {os, {atom(), atom()}} | {erlang_version, string()} | - {memory, any()}]). --spec(is_running/0 :: () -> boolean()). --spec(is_running/1 :: (node()) -> boolean()). --spec(environment/0 :: () -> [{param(), term()}]). --spec(rotate_logs/1 :: (file_suffix()) -> rabbit_types:ok_or_error(any())). --spec(force_event_refresh/1 :: (reference()) -> 'ok'). - --spec(log_location/1 :: ('sasl' | 'kernel') -> log_location()). - --spec(start/2 :: ('normal',[]) -> - {'error', - {'erlang_version_too_old', - {'found',string(),string()}, - {'required',string(),string()}}} | - {'ok',pid()}). --spec(stop/1 :: (_) -> 'ok'). - --spec(maybe_insert_default_data/0 :: () -> 'ok'). --spec(boot_delegate/0 :: () -> 'ok'). --spec(recover/0 :: () -> 'ok'). --spec(start_apps/1 :: ([app_name()]) -> 'ok'). --spec(stop_apps/1 :: ([app_name()]) -> 'ok'). - --endif. + {memory, any()}]. +-spec is_running() -> boolean(). +-spec is_running(node()) -> boolean(). +-spec environment() -> [{param(), term()}]. +-spec rotate_logs(file_suffix()) -> rabbit_types:ok_or_error(any()). +-spec force_event_refresh(reference()) -> 'ok'. + +-spec log_location('sasl' | 'kernel') -> log_location(). + +-spec start('normal',[]) -> + {'error', + {'erlang_version_too_old', + {'found',string(),string()}, + {'required',string(),string()}}} | + {'ok',pid()}. +-spec stop(_) -> 'ok'. + +-spec maybe_insert_default_data() -> 'ok'. +-spec boot_delegate() -> 'ok'. +-spec recover() -> 'ok'. +-spec start_apps([app_name()]) -> 'ok'. +-spec stop_apps([app_name()]) -> 'ok'. %%---------------------------------------------------------------------------- @@ -600,9 +596,8 @@ prep_stop(_State) -> stop(_) -> ok. --ifdef(use_specs). --spec(boot_error/2 :: (term(), not_available | [tuple()]) -> no_return()). --endif. +-spec boot_error(term(), not_available | [tuple()]) -> no_return(). + boot_error({could_not_start, rabbit, {{timeout_waiting_for_tables, _}, _}}, _Stacktrace) -> AllNodes = rabbit_mnesia:cluster_nodes(all), @@ -630,10 +625,9 @@ boot_error(Reason, Stacktrace) -> Args = [Reason, log_location(kernel), log_location(sasl)], boot_error(Reason, Fmt, Args, Stacktrace). --ifdef(use_specs). --spec(boot_error/4 :: (term(), string(), [any()], not_available | [tuple()]) - -> no_return()). --endif. +-spec boot_error(term(), string(), [any()], not_available | [tuple()]) -> + no_return(). + boot_error(Reason, Fmt, Args, not_available) -> log_boot_error_and_exit(Reason, Fmt, Args); boot_error(Reason, Fmt, Args, Stacktrace) -> diff --git a/src/rabbit_access_control.erl b/src/rabbit_access_control.erl index dae4d4732ac1..3ae7d7f6906d 100644 --- a/src/rabbit_access_control.erl +++ b/src/rabbit_access_control.erl @@ -23,31 +23,28 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - -export_type([permission_atom/0]). --type(permission_atom() :: 'configure' | 'read' | 'write'). - --spec(check_user_pass_login/2 :: - (rabbit_types:username(), rabbit_types:password()) - -> {'ok', rabbit_types:user()} | - {'refused', rabbit_types:username(), string(), [any()]}). --spec(check_user_login/2 :: - (rabbit_types:username(), [{atom(), any()}]) - -> {'ok', rabbit_types:user()} | - {'refused', rabbit_types:username(), string(), [any()]}). --spec(check_user_loopback/2 :: (rabbit_types:username(), - rabbit_net:socket() | inet:ip_address()) - -> 'ok' | 'not_allowed'). --spec(check_vhost_access/3 :: - (rabbit_types:user(), rabbit_types:vhost(), rabbit_net:socket() | #authz_socket_info{}) - -> 'ok' | rabbit_types:channel_exit()). --spec(check_resource_access/3 :: - (rabbit_types:user(), rabbit_types:r(atom()), permission_atom()) - -> 'ok' | rabbit_types:channel_exit()). - --endif. +-type permission_atom() :: 'configure' | 'read' | 'write'. + +-spec check_user_pass_login + (rabbit_types:username(), rabbit_types:password()) -> + {'ok', rabbit_types:user()} | + {'refused', rabbit_types:username(), string(), [any()]}. +-spec check_user_login + (rabbit_types:username(), [{atom(), any()}]) -> + {'ok', rabbit_types:user()} | + {'refused', rabbit_types:username(), string(), [any()]}. +-spec check_user_loopback + (rabbit_types:username(), rabbit_net:socket() | inet:ip_address()) -> + 'ok' | 'not_allowed'. +-spec check_vhost_access + (rabbit_types:user(), rabbit_types:vhost(), + rabbit_net:socket() | #authz_socket_info{}) -> + 'ok' | rabbit_types:channel_exit(). +-spec check_resource_access + (rabbit_types:user(), rabbit_types:r(atom()), permission_atom()) -> + 'ok' | rabbit_types:channel_exit(). %%---------------------------------------------------------------------------- diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 30743ea24376..dd64c6f1c8f8 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -43,32 +43,24 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - -record(alarms, {alertees :: dict:dict(pid(), rabbit_types:mfargs()), alarmed_nodes :: dict:dict(node(), [resource_alarm_source()]), alarms :: [alarm()]}). --type(local_alarm() :: 'file_descriptor_limit'). --type(resource_alarm_source() :: 'disk' | 'memory'). --type(resource_alarm() :: {resource_limit, resource_alarm_source(), node()}). --type(alarm() :: local_alarm() | resource_alarm()). - --spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). --spec(start/0 :: () -> 'ok'). --spec(stop/0 :: () -> 'ok'). --spec(register/2 :: (pid(), rabbit_types:mfargs()) -> [atom()]). --spec(set_alarm/1 :: ({alarm(), []}) -> 'ok'). --spec(clear_alarm/1 :: (alarm()) -> 'ok'). --spec(on_node_up/1 :: (node()) -> 'ok'). --spec(on_node_down/1 :: (node()) -> 'ok'). --spec(get_alarms/0 :: () -> [{alarm(), []}]). - --else. - --record(alarms, {alertees, alarmed_nodes, alarms}). - --endif. +-type local_alarm() :: 'file_descriptor_limit'. +-type resource_alarm_source() :: 'disk' | 'memory'. +-type resource_alarm() :: {resource_limit, resource_alarm_source(), node()}. +-type alarm() :: local_alarm() | resource_alarm(). + +-spec start_link() -> rabbit_types:ok_pid_or_error(). +-spec start() -> 'ok'. +-spec stop() -> 'ok'. +-spec register(pid(), rabbit_types:mfargs()) -> [atom()]. +-spec set_alarm({alarm(), []}) -> 'ok'. +-spec clear_alarm(alarm()) -> 'ok'. +-spec on_node_up(node()) -> 'ok'. +-spec on_node_down(node()) -> 'ok'. +-spec get_alarms() -> [{alarm(), []}]. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index cd21aa62b87f..d3cc94bb0e46 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -91,14 +91,11 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(info_keys/0 :: () -> rabbit_types:info_keys()). --spec(init_with_backing_queue_state/7 :: +-spec info_keys() -> rabbit_types:info_keys(). +-spec init_with_backing_queue_state (rabbit_types:amqqueue(), atom(), tuple(), any(), - [rabbit_types:delivery()], pmon:pmon(), dict:dict()) -> #q{}). - --endif. + [rabbit_types:delivery()], pmon:pmon(), dict:dict()) -> + #q{}. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_amqqueue_sup.erl b/src/rabbit_amqqueue_sup.erl index e46ae0f508c6..f1e770aa455a 100644 --- a/src/rabbit_amqqueue_sup.erl +++ b/src/rabbit_amqqueue_sup.erl @@ -26,12 +26,8 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start_link/2 :: (rabbit_types:amqqueue(), rabbit_prequeue:start_mode()) -> - {'ok', pid(), pid()}). - --endif. +-spec start_link(rabbit_types:amqqueue(), rabbit_prequeue:start_mode()) -> + {'ok', pid(), pid()}. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_amqqueue_sup_sup.erl b/src/rabbit_amqqueue_sup_sup.erl index 68aabbfe28a2..c57d9334e26c 100644 --- a/src/rabbit_amqqueue_sup_sup.erl +++ b/src/rabbit_amqqueue_sup_sup.erl @@ -28,13 +28,10 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). --spec(start_queue_process/3 :: (node(), rabbit_types:amqqueue(), - 'declare' | 'recovery' | 'slave') -> pid()). - --endif. +-spec start_link() -> rabbit_types:ok_pid_or_error(). +-spec start_queue_process + (node(), rabbit_types:amqqueue(), 'declare' | 'recovery' | 'slave') -> + pid(). %%---------------------------------------------------------------------------- diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 8904c1dd74ae..7cebd194a612 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -29,78 +29,77 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - -export_type([key/0, deletions/0]). --type(key() :: binary()). +-type key() :: binary(). --type(bind_errors() :: rabbit_types:error( +-type bind_errors() :: rabbit_types:error( {'resources_missing', [{'not_found', (rabbit_types:binding_source() | rabbit_types:binding_destination())} | - {'absent', rabbit_types:amqqueue()}]})). + {'absent', rabbit_types:amqqueue()}]}). --type(bind_ok_or_error() :: 'ok' | bind_errors() | +-type bind_ok_or_error() :: 'ok' | bind_errors() | rabbit_types:error( 'binding_not_found' | - {'binding_invalid', string(), [any()]})). --type(bind_res() :: bind_ok_or_error() | rabbit_misc:thunk(bind_ok_or_error())). --type(inner_fun() :: + {'binding_invalid', string(), [any()]}). +-type bind_res() :: bind_ok_or_error() | rabbit_misc:thunk(bind_ok_or_error()). +-type inner_fun() :: fun((rabbit_types:exchange(), rabbit_types:exchange() | rabbit_types:amqqueue()) -> - rabbit_types:ok_or_error(rabbit_types:amqp_error()))). --type(bindings() :: [rabbit_types:binding()]). + rabbit_types:ok_or_error(rabbit_types:amqp_error())). +-type bindings() :: [rabbit_types:binding()]. %% TODO this should really be opaque but that seems to confuse 17.1's %% dialyzer into objecting to everything that uses it. --type(deletions() :: dict:dict()). - --spec(recover/2 :: ([rabbit_exchange:name()], [rabbit_amqqueue:name()]) -> - 'ok'). --spec(exists/1 :: (rabbit_types:binding()) -> boolean() | bind_errors()). --spec(add/1 :: (rabbit_types:binding()) -> bind_res()). --spec(add/2 :: (rabbit_types:binding(), inner_fun()) -> bind_res()). --spec(remove/1 :: (rabbit_types:binding()) -> bind_res()). --spec(remove/2 :: (rabbit_types:binding(), inner_fun()) -> bind_res()). --spec(list/1 :: (rabbit_types:vhost()) -> bindings()). --spec(list_for_source/1 :: - (rabbit_types:binding_source()) -> bindings()). --spec(list_for_destination/1 :: - (rabbit_types:binding_destination()) -> bindings()). --spec(list_for_source_and_destination/2 :: +-type deletions() :: dict:dict(). + +-spec recover([rabbit_exchange:name()], [rabbit_amqqueue:name()]) -> + 'ok'. +-spec exists(rabbit_types:binding()) -> boolean() | bind_errors(). +-spec add(rabbit_types:binding()) -> bind_res(). +-spec add(rabbit_types:binding(), inner_fun()) -> bind_res(). +-spec remove(rabbit_types:binding()) -> bind_res(). +-spec remove(rabbit_types:binding(), inner_fun()) -> bind_res(). +-spec list(rabbit_types:vhost()) -> bindings(). +-spec list_for_source + (rabbit_types:binding_source()) -> bindings(). +-spec list_for_destination + (rabbit_types:binding_destination()) -> bindings(). +-spec list_for_source_and_destination (rabbit_types:binding_source(), rabbit_types:binding_destination()) -> - bindings()). --spec(info_keys/0 :: () -> rabbit_types:info_keys()). --spec(info/1 :: (rabbit_types:binding()) -> rabbit_types:infos()). --spec(info/2 :: (rabbit_types:binding(), rabbit_types:info_keys()) -> - rabbit_types:infos()). --spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]). --spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys()) - -> [rabbit_types:infos()]). --spec(info_all/4 ::(rabbit_types:vhost(), rabbit_types:info_keys(), - reference(), pid()) -> 'ok'). --spec(has_for_source/1 :: (rabbit_types:binding_source()) -> boolean()). --spec(remove_for_source/1 :: (rabbit_types:binding_source()) -> bindings()). --spec(remove_for_destination/2 :: - (rabbit_types:binding_destination(), boolean()) -> deletions()). --spec(remove_transient_for_destination/1 :: - (rabbit_types:binding_destination()) -> deletions()). --spec(process_deletions/1 :: (deletions()) -> rabbit_misc:thunk('ok')). --spec(combine_deletions/2 :: (deletions(), deletions()) -> deletions()). --spec(add_deletion/3 :: (rabbit_exchange:name(), - {'undefined' | rabbit_types:exchange(), - 'deleted' | 'not_deleted', - bindings()}, deletions()) -> deletions()). --spec(new_deletions/0 :: () -> deletions()). - --endif. + bindings(). +-spec info_keys() -> rabbit_types:info_keys(). +-spec info(rabbit_types:binding()) -> rabbit_types:infos(). +-spec info(rabbit_types:binding(), rabbit_types:info_keys()) -> + rabbit_types:infos(). +-spec info_all(rabbit_types:vhost()) -> [rabbit_types:infos()]. +-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys()) -> + [rabbit_types:infos()]. +-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys(), + reference(), pid()) -> 'ok'. +-spec has_for_source(rabbit_types:binding_source()) -> boolean(). +-spec remove_for_source(rabbit_types:binding_source()) -> bindings(). +-spec remove_for_destination + (rabbit_types:binding_destination(), boolean()) -> deletions(). +-spec remove_transient_for_destination + (rabbit_types:binding_destination()) -> deletions(). +-spec process_deletions(deletions()) -> rabbit_misc:thunk('ok'). +-spec combine_deletions(deletions(), deletions()) -> deletions(). +-spec add_deletion + (rabbit_exchange:name(), + {'undefined' | rabbit_types:exchange(), + 'deleted' | 'not_deleted', + bindings()}, + deletions()) -> + deletions(). +-spec new_deletions() -> deletions(). %%---------------------------------------------------------------------------- -define(INFO_KEYS, [source_name, source_kind, destination_name, destination_kind, - routing_key, arguments, + routing_key, arguments, vhost]). recover(XNames, QNames) -> diff --git a/src/rabbit_channel_sup.erl b/src/rabbit_channel_sup.erl index 7aa369b6cac9..48cc1e15c2a2 100644 --- a/src/rabbit_channel_sup.erl +++ b/src/rabbit_channel_sup.erl @@ -36,22 +36,18 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - -export_type([start_link_args/0]). --type(start_link_args() :: +-type start_link_args() :: {'tcp', rabbit_net:socket(), rabbit_channel:channel_number(), non_neg_integer(), pid(), string(), rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(), pid()} | {'direct', rabbit_channel:channel_number(), pid(), string(), rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(), - rabbit_framing:amqp_table(), pid()}). - --spec(start_link/1 :: (start_link_args()) -> {'ok', pid(), {pid(), any()}}). + rabbit_framing:amqp_table(), pid()}. --endif. +-spec start_link(start_link_args()) -> {'ok', pid(), {pid(), any()}}. -define(FAIR_WAIT, 70000). diff --git a/src/rabbit_channel_sup_sup.erl b/src/rabbit_channel_sup_sup.erl index bf483da71341..885d34d0a708 100644 --- a/src/rabbit_channel_sup_sup.erl +++ b/src/rabbit_channel_sup_sup.erl @@ -29,13 +29,9 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). --spec(start_channel/2 :: (pid(), rabbit_channel_sup:start_link_args()) -> - {'ok', pid(), {pid(), any()}}). - --endif. +-spec start_link() -> rabbit_types:ok_pid_or_error(). +-spec start_channel(pid(), rabbit_channel_sup:start_link_args()) -> + {'ok', pid(), {pid(), any()}}. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_cli.erl b/src/rabbit_cli.erl index 415150495631..d1229c32f916 100644 --- a/src/rabbit_cli.erl +++ b/src/rabbit_cli.erl @@ -23,32 +23,33 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --type(option_name() :: string()). --type(option_value() :: string() | node() | boolean()). --type(optdef() :: flag | {option, string()}). --type(parse_result() :: {'ok', {atom(), [{option_name(), option_value()}], [string()]}} | - 'no_command'). - --spec(main/3 :: (fun (([string()], string()) -> parse_result()), - fun ((atom(), atom(), [any()], [any()]) -> any()), - atom()) -> no_return()). --spec(start_distribution/0 :: () -> {'ok', pid()} | {'error', any()}). --spec(start_distribution/1 :: (string()) -> {'ok', pid()} | {'error', any()}). --spec(usage/1 :: (atom()) -> no_return()). --spec(parse_arguments/4 :: +-type option_name() :: string(). +-type option_value() :: string() | node() | boolean(). +-type optdef() :: flag | {option, string()}. +-type parse_result() :: {'ok', {atom(), [{option_name(), option_value()}], [string()]}} | + 'no_command'. + +-spec main + (fun (([string()], string()) -> parse_result()), + fun ((atom(), atom(), [any()], [any()]) -> any()), + atom()) -> + no_return(). +-spec start_distribution() -> {'ok', pid()} | {'error', any()}. +-spec start_distribution(string()) -> {'ok', pid()} | {'error', any()}. +-spec usage(atom()) -> no_return(). +-spec parse_arguments ([{atom(), [{string(), optdef()}]} | atom()], - [{string(), optdef()}], string(), [string()]) -> parse_result()). - --spec(filter_opts/2 :: ([{option_name(), option_value()}], [option_name()]) -> [boolean()]). + [{string(), optdef()}], string(), [string()]) -> + parse_result(). --spec(rpc_call/4 :: (node(), atom(), atom(), [any()]) -> any()). --spec(rpc_call/5 :: (node(), atom(), atom(), [any()], number()) -> any()). --spec(rpc_call/7 :: (node(), atom(), atom(), [any()], reference(), pid(), - number()) -> any()). +-spec filter_opts([{option_name(), option_value()}], [option_name()]) -> + [boolean()]. --endif. +-spec rpc_call(node(), atom(), atom(), [any()]) -> any(). +-spec rpc_call(node(), atom(), atom(), [any()], number()) -> any(). +-spec rpc_call + (node(), atom(), atom(), [any()], reference(), pid(), number()) -> + any(). ensure_cli_distribution() -> case start_distribution() of diff --git a/src/rabbit_client_sup.erl b/src/rabbit_client_sup.erl index 3779fd1dc46b..77f0bcb99386 100644 --- a/src/rabbit_client_sup.erl +++ b/src/rabbit_client_sup.erl @@ -26,16 +26,12 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start_link/1 :: (rabbit_types:mfargs()) -> - rabbit_types:ok_pid_or_error()). --spec(start_link/2 :: ({'local', atom()}, rabbit_types:mfargs()) -> - rabbit_types:ok_pid_or_error()). --spec(start_link_worker/2 :: ({'local', atom()}, rabbit_types:mfargs()) -> - rabbit_types:ok_pid_or_error()). - --endif. +-spec start_link(rabbit_types:mfargs()) -> + rabbit_types:ok_pid_or_error(). +-spec start_link({'local', atom()}, rabbit_types:mfargs()) -> + rabbit_types:ok_pid_or_error(). +-spec start_link_worker({'local', atom()}, rabbit_types:mfargs()) -> + rabbit_types:ok_pid_or_error(). %%---------------------------------------------------------------------------- @@ -54,4 +50,3 @@ init({M,F,A}) -> init({{M,F,A}, worker}) -> {ok, {{simple_one_for_one, 0, 1}, [{client, {M,F,A}, temporary, ?WORKER_WAIT, worker, [M]}]}}. - diff --git a/src/rabbit_connection_helper_sup.erl b/src/rabbit_connection_helper_sup.erl index 14374ac169cb..bde520b74b8c 100644 --- a/src/rabbit_connection_helper_sup.erl +++ b/src/rabbit_connection_helper_sup.erl @@ -37,12 +37,10 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). --spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). --spec(start_channel_sup_sup/1 :: (pid()) -> rabbit_types:ok_pid_or_error()). --spec(start_queue_collector/2 :: (pid(), rabbit_types:proc_name()) -> - rabbit_types:ok_pid_or_error()). --endif. +-spec start_link() -> rabbit_types:ok_pid_or_error(). +-spec start_channel_sup_sup(pid()) -> rabbit_types:ok_pid_or_error(). +-spec start_queue_collector(pid(), rabbit_types:proc_name()) -> + rabbit_types:ok_pid_or_error(). %%---------------------------------------------------------------------------- @@ -65,4 +63,3 @@ start_queue_collector(SupPid, Identity) -> init([]) -> {ok, {{one_for_one, 10, 10}, []}}. - diff --git a/src/rabbit_connection_sup.erl b/src/rabbit_connection_sup.erl index 161f5bfe0613..154bbb192245 100644 --- a/src/rabbit_connection_sup.erl +++ b/src/rabbit_connection_sup.erl @@ -36,12 +36,9 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start_link/4 :: (any(), rabbit_net:socket(), module(), any()) -> {'ok', pid(), pid()}). --spec(reader/1 :: (pid()) -> pid()). - --endif. +-spec start_link(any(), rabbit_net:socket(), module(), any()) -> + {'ok', pid(), pid()}. +-spec reader(pid()) -> pid(). %%-------------------------------------------------------------------------- diff --git a/src/rabbit_control_main.erl b/src/rabbit_control_main.erl index 271ca4484e34..fb3da212872b 100644 --- a/src/rabbit_control_main.erl +++ b/src/rabbit_control_main.erl @@ -124,21 +124,17 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start/0 :: () -> no_return()). --spec(stop/0 :: () -> 'ok'). --spec(action/5 :: +-spec start() -> no_return(). +-spec stop() -> 'ok'. +-spec action (atom(), node(), [string()], [{string(), any()}], - fun ((string(), [any()]) -> 'ok')) - -> 'ok'). + fun ((string(), [any()]) -> 'ok')) -> + 'ok'. --spec(action/6 :: +-spec action (atom(), node(), [string()], [{string(), any()}], - fun ((string(), [any()]) -> 'ok'), timeout()) - -> 'ok'). - --endif. + fun ((string(), [any()]) -> 'ok'), timeout()) -> + 'ok'. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_dead_letter.erl b/src/rabbit_dead_letter.erl index 8d426f072a1d..91d23c83a4d6 100644 --- a/src/rabbit_dead_letter.erl +++ b/src/rabbit_dead_letter.erl @@ -23,15 +23,11 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - -type reason() :: 'expired' | 'rejected' | 'maxlen'. -spec publish(rabbit_types:message(), reason(), rabbit_types:exchange(), 'undefined' | binary(), rabbit_amqqueue:name()) -> 'ok'. --endif. - %%---------------------------------------------------------------------------- publish(Msg, Reason, X, RK, QName) -> diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl index b5970274d422..061105c15012 100644 --- a/src/rabbit_direct.erl +++ b/src/rabbit_direct.erl @@ -25,27 +25,25 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(boot/0 :: () -> 'ok'). --spec(force_event_refresh/1 :: (reference()) -> 'ok'). --spec(list/0 :: () -> [pid()]). --spec(list_local/0 :: () -> [pid()]). --spec(connect/5 :: (({'none', 'none'} | {rabbit_types:username(), 'none'} | - {rabbit_types:username(), rabbit_types:password()}), - rabbit_types:vhost(), rabbit_types:protocol(), pid(), - rabbit_event:event_props()) -> - rabbit_types:ok_or_error2( - {rabbit_types:user(), rabbit_framing:amqp_table()}, - 'broker_not_found_on_node' | - {'auth_failure', string()} | 'access_refused')). --spec(start_channel/9 :: +-spec boot() -> 'ok'. +-spec force_event_refresh(reference()) -> 'ok'. +-spec list() -> [pid()]. +-spec list_local() -> [pid()]. +-spec connect + (({'none', 'none'} | {rabbit_types:username(), 'none'} | + {rabbit_types:username(), rabbit_types:password()}), + rabbit_types:vhost(), rabbit_types:protocol(), pid(), + rabbit_event:event_props()) -> + rabbit_types:ok_or_error2( + {rabbit_types:user(), rabbit_framing:amqp_table()}, + 'broker_not_found_on_node' | + {'auth_failure', string()} | 'access_refused'). +-spec start_channel (rabbit_channel:channel_number(), pid(), pid(), string(), rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(), - rabbit_framing:amqp_table(), pid()) -> {'ok', pid()}). --spec(disconnect/2 :: (pid(), rabbit_event:event_props()) -> 'ok'). - --endif. + rabbit_framing:amqp_table(), pid()) -> + {'ok', pid()}. +-spec disconnect(pid(), rabbit_event:event_props()) -> 'ok'. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_disk_monitor.erl b/src/rabbit_disk_monitor.erl index a56b92b50116..4c1ff0248621 100644 --- a/src/rabbit_disk_monitor.erl +++ b/src/rabbit_disk_monitor.erl @@ -1,4 +1,4 @@ -%% The contents of this file are subject to the Mozilla Public License +% The contents of this file are subject to the Mozilla Public License %% Version 1.1 (the "License"); you may not use this file except in %% compliance with the License. You may obtain a copy of the License %% at http://www.mozilla.org/MPL/ @@ -70,19 +70,15 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --type(disk_free_limit() :: (integer() | string() | {'mem_relative', float()})). --spec(start_link/1 :: (disk_free_limit()) -> rabbit_types:ok_pid_or_error()). --spec(get_disk_free_limit/0 :: () -> integer()). --spec(set_disk_free_limit/1 :: (disk_free_limit()) -> 'ok'). --spec(get_min_check_interval/0 :: () -> integer()). --spec(set_min_check_interval/1 :: (integer()) -> 'ok'). --spec(get_max_check_interval/0 :: () -> integer()). --spec(set_max_check_interval/1 :: (integer()) -> 'ok'). --spec(get_disk_free/0 :: () -> (integer() | 'unknown')). - --endif. +-type disk_free_limit() :: (integer() | string() | {'mem_relative', float()}). +-spec start_link(disk_free_limit()) -> rabbit_types:ok_pid_or_error(). +-spec get_disk_free_limit() -> integer(). +-spec set_disk_free_limit(disk_free_limit()) -> 'ok'. +-spec get_min_check_interval() -> integer(). +-spec set_min_check_interval(integer()) -> 'ok'. +-spec get_max_check_interval() -> integer(). +-spec set_max_check_interval(integer()) -> 'ok'. +-spec get_disk_free() -> (integer() | 'unknown'). %%---------------------------------------------------------------------------- %% Public API diff --git a/src/rabbit_epmd_monitor.erl b/src/rabbit_epmd_monitor.erl index d95ec49140c4..7f01a7183889 100644 --- a/src/rabbit_epmd_monitor.erl +++ b/src/rabbit_epmd_monitor.erl @@ -30,11 +30,7 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). - --endif. +-spec start_link() -> rabbit_types:ok_pid_or_error(). %%---------------------------------------------------------------------------- %% It's possible for epmd to be killed out from underneath us. If that diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl index 20af0c21a140..5ba3ce7a4f3d 100644 --- a/src/rabbit_error_logger.erl +++ b/src/rabbit_error_logger.erl @@ -31,12 +31,8 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start/0 :: () -> 'ok'). --spec(stop/0 :: () -> 'ok'). - --endif. +-spec start() -> 'ok'. +-spec stop() -> 'ok'. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 2e9afbfd2e7d..aaea27f91ac9 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -29,78 +29,74 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - -export_type([name/0, type/0]). --type(name() :: rabbit_types:r('exchange')). --type(type() :: atom()). --type(fun_name() :: atom()). +-type name() :: rabbit_types:r('exchange'). +-type type() :: atom(). +-type fun_name() :: atom(). --spec(recover/0 :: () -> [name()]). --spec(callback/4:: +-spec recover() -> [name()]. +-spec callback (rabbit_types:exchange(), fun_name(), - fun((boolean()) -> non_neg_integer()) | atom(), [any()]) -> 'ok'). --spec(policy_changed/2 :: - (rabbit_types:exchange(), rabbit_types:exchange()) -> 'ok'). --spec(declare/6 :: + fun((boolean()) -> non_neg_integer()) | atom(), [any()]) -> 'ok'. +-spec policy_changed + (rabbit_types:exchange(), rabbit_types:exchange()) -> 'ok'. +-spec declare (name(), type(), boolean(), boolean(), boolean(), rabbit_framing:amqp_table()) - -> rabbit_types:exchange()). --spec(check_type/1 :: - (binary()) -> atom() | rabbit_types:connection_exit()). --spec(assert_equivalence/6 :: + -> rabbit_types:exchange(). +-spec check_type + (binary()) -> atom() | rabbit_types:connection_exit(). +-spec assert_equivalence (rabbit_types:exchange(), atom(), boolean(), boolean(), boolean(), rabbit_framing:amqp_table()) - -> 'ok' | rabbit_types:connection_exit()). --spec(assert_args_equivalence/2 :: + -> 'ok' | rabbit_types:connection_exit(). +-spec assert_args_equivalence (rabbit_types:exchange(), rabbit_framing:amqp_table()) - -> 'ok' | rabbit_types:connection_exit()). --spec(lookup/1 :: + -> 'ok' | rabbit_types:connection_exit(). +-spec lookup (name()) -> rabbit_types:ok(rabbit_types:exchange()) | - rabbit_types:error('not_found')). --spec(lookup_or_die/1 :: + rabbit_types:error('not_found'). +-spec lookup_or_die (name()) -> rabbit_types:exchange() | - rabbit_types:channel_exit()). --spec(list/0 :: () -> [rabbit_types:exchange()]). --spec(list/1 :: (rabbit_types:vhost()) -> [rabbit_types:exchange()]). --spec(lookup_scratch/2 :: (name(), atom()) -> + rabbit_types:channel_exit(). +-spec list() -> [rabbit_types:exchange()]. +-spec list(rabbit_types:vhost()) -> [rabbit_types:exchange()]. +-spec lookup_scratch(name(), atom()) -> rabbit_types:ok(term()) | - rabbit_types:error('not_found')). --spec(update_scratch/3 :: (name(), atom(), fun((any()) -> any())) -> 'ok'). --spec(update/2 :: + rabbit_types:error('not_found'). +-spec update_scratch(name(), atom(), fun((any()) -> any())) -> 'ok'. +-spec update (name(), fun((rabbit_types:exchange()) -> rabbit_types:exchange())) - -> not_found | rabbit_types:exchange()). --spec(update_decorators/1 :: (name()) -> 'ok'). --spec(immutable/1 :: (rabbit_types:exchange()) -> rabbit_types:exchange()). --spec(info_keys/0 :: () -> rabbit_types:info_keys()). --spec(info/1 :: (rabbit_types:exchange()) -> rabbit_types:infos()). --spec(info/2 :: + -> not_found | rabbit_types:exchange(). +-spec update_decorators(name()) -> 'ok'. +-spec immutable(rabbit_types:exchange()) -> rabbit_types:exchange(). +-spec info_keys() -> rabbit_types:info_keys(). +-spec info(rabbit_types:exchange()) -> rabbit_types:infos(). +-spec info (rabbit_types:exchange(), rabbit_types:info_keys()) - -> rabbit_types:infos()). --spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]). --spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys()) - -> [rabbit_types:infos()]). --spec(info_all/4 ::(rabbit_types:vhost(), rabbit_types:info_keys(), + -> rabbit_types:infos(). +-spec info_all(rabbit_types:vhost()) -> [rabbit_types:infos()]. +-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys()) + -> [rabbit_types:infos()]. +-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys(), reference(), pid()) - -> 'ok'). --spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) - -> [rabbit_amqqueue:name()]). --spec(delete/2 :: + -> 'ok'. +-spec route(rabbit_types:exchange(), rabbit_types:delivery()) + -> [rabbit_amqqueue:name()]. +-spec delete (name(), 'true') -> 'ok' | rabbit_types:error('not_found' | 'in_use'); - (name(), 'false') -> 'ok' | rabbit_types:error('not_found')). --spec(validate_binding/2 :: + (name(), 'false') -> 'ok' | rabbit_types:error('not_found'). +-spec validate_binding (rabbit_types:exchange(), rabbit_types:binding()) - -> rabbit_types:ok_or_error({'binding_invalid', string(), [any()]})). --spec(maybe_auto_delete/2:: + -> rabbit_types:ok_or_error({'binding_invalid', string(), [any()]}). +-spec maybe_auto_delete (rabbit_types:exchange(), boolean()) - -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). --spec(serial/1 :: (rabbit_types:exchange()) -> - fun((boolean()) -> 'none' | pos_integer())). --spec(peek_serial/1 :: (name()) -> pos_integer() | 'undefined'). - --endif. + -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}. +-spec serial(rabbit_types:exchange()) -> + fun((boolean()) -> 'none' | pos_integer()). +-spec peek_serial(name()) -> pos_integer() | 'undefined'. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index 444d507c7e59..196873aa22db 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -32,10 +32,9 @@ {requires, rabbit_registry}, {enables, kernel_ready}]}). --ifdef(use_specs). --spec(headers_match/2 :: (rabbit_framing:amqp_table(), - rabbit_framing:amqp_table()) -> boolean()). --endif. +-spec headers_match + (rabbit_framing:amqp_table(), rabbit_framing:amqp_table()) -> + boolean(). description() -> [{description, <<"AMQP headers exchange, as per the AMQP specification">>}]. diff --git a/src/rabbit_exchange_type_invalid.erl b/src/rabbit_exchange_type_invalid.erl index c8ca7ecae41c..2510c8a241c3 100644 --- a/src/rabbit_exchange_type_invalid.erl +++ b/src/rabbit_exchange_type_invalid.erl @@ -31,10 +31,8 @@ description() -> serialise_events() -> false. --ifdef(use_specs). --spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) - -> no_return()). --endif. +-spec route(rabbit_types:exchange(), rabbit_types:delivery()) -> no_return(). + route(#exchange{name = Name, type = Type}, _) -> rabbit_misc:protocol_error( precondition_failed, diff --git a/src/rabbit_file.erl b/src/rabbit_file.erl index 1088f2c2dd0c..878b9da7a73b 100644 --- a/src/rabbit_file.erl +++ b/src/rabbit_file.erl @@ -31,37 +31,30 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --type(ok_or_error() :: rabbit_types:ok_or_error(any())). - --spec(is_file/1 :: ((file:filename())) -> boolean()). --spec(is_dir/1 :: ((file:filename())) -> boolean()). --spec(file_size/1 :: ((file:filename())) -> non_neg_integer()). --spec(ensure_dir/1 :: ((file:filename())) -> ok_or_error()). --spec(wildcard/2 :: (string(), file:filename()) -> [file:filename()]). --spec(list_dir/1 :: (file:filename()) -> rabbit_types:ok_or_error2( - [file:filename()], any())). --spec(read_term_file/1 :: - (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any())). --spec(write_term_file/2 :: (file:filename(), [any()]) -> ok_or_error()). --spec(write_file/2 :: (file:filename(), iodata()) -> ok_or_error()). --spec(write_file/3 :: (file:filename(), iodata(), [any()]) -> ok_or_error()). --spec(append_file/2 :: (file:filename(), string()) -> ok_or_error()). --spec(ensure_parent_dirs_exist/1 :: (string()) -> 'ok'). --spec(rename/2 :: - (file:filename(), file:filename()) -> ok_or_error()). --spec(delete/1 :: ([file:filename()]) -> ok_or_error()). --spec(recursive_delete/1 :: - ([file:filename()]) - -> rabbit_types:ok_or_error({file:filename(), any()})). --spec(recursive_copy/2 :: - (file:filename(), file:filename()) - -> rabbit_types:ok_or_error({file:filename(), file:filename(), any()})). --spec(lock_file/1 :: (file:filename()) -> rabbit_types:ok_or_error('eexist')). --spec(filename_as_a_directory/1 :: (file:filename()) -> file:filename()). - --endif. +-type ok_or_error() :: rabbit_types:ok_or_error(any()). + +-spec is_file((file:filename())) -> boolean(). +-spec is_dir((file:filename())) -> boolean(). +-spec file_size((file:filename())) -> non_neg_integer(). +-spec ensure_dir((file:filename())) -> ok_or_error(). +-spec wildcard(string(), file:filename()) -> [file:filename()]. +-spec list_dir(file:filename()) -> + rabbit_types:ok_or_error2([file:filename()], any()). +-spec read_term_file + (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any()). +-spec write_term_file(file:filename(), [any()]) -> ok_or_error(). +-spec write_file(file:filename(), iodata()) -> ok_or_error(). +-spec write_file(file:filename(), iodata(), [any()]) -> ok_or_error(). +-spec append_file(file:filename(), string()) -> ok_or_error(). +-spec ensure_parent_dirs_exist(string()) -> 'ok'. +-spec rename(file:filename(), file:filename()) -> ok_or_error(). +-spec delete([file:filename()]) -> ok_or_error(). +-spec recursive_delete([file:filename()]) -> + rabbit_types:ok_or_error({file:filename(), any()}). +-spec recursive_copy(file:filename(), file:filename()) -> + rabbit_types:ok_or_error({file:filename(), file:filename(), any()}). +-spec lock_file(file:filename()) -> rabbit_types:ok_or_error('eexist'). +-spec filename_as_a_directory(file:filename()) -> file:filename(). %%---------------------------------------------------------------------------- diff --git a/src/rabbit_framing.erl b/src/rabbit_framing.erl index e5b54dc4e003..e4a5013003f2 100644 --- a/src/rabbit_framing.erl +++ b/src/rabbit_framing.erl @@ -18,8 +18,6 @@ -module(rabbit_framing). --ifdef(use_specs). - -export_type([protocol/0, amqp_field_type/0, amqp_property_type/0, amqp_table/0, amqp_array/0, amqp_value/0, @@ -27,7 +25,7 @@ amqp_method_field_name/0, amqp_property_record/0, amqp_exception/0, amqp_exception_code/0, amqp_class_id/0]). --type(protocol() :: 'rabbit_framing_amqp_0_8' | 'rabbit_framing_amqp_0_9_1'). +-type protocol() :: 'rabbit_framing_amqp_0_8' | 'rabbit_framing_amqp_0_9_1'. -define(protocol_type(T), type(T :: rabbit_framing_amqp_0_8:T | rabbit_framing_amqp_0_9_1:T)). @@ -45,5 +43,3 @@ -?protocol_type(amqp_exception()). -?protocol_type(amqp_exception_code()). -?protocol_type(amqp_class_id()). - --endif. diff --git a/src/rabbit_guid.erl b/src/rabbit_guid.erl index 4d2b450409be..75f9df7b3fae 100644 --- a/src/rabbit_guid.erl +++ b/src/rabbit_guid.erl @@ -32,20 +32,16 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - -export_type([guid/0]). --type(guid() :: binary()). - --spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). --spec(filename/0 :: () -> string()). --spec(gen/0 :: () -> guid()). --spec(gen_secure/0 :: () -> guid()). --spec(string/2 :: (guid(), any()) -> string()). --spec(binary/2 :: (guid(), any()) -> binary()). +-type guid() :: binary(). --endif. +-spec start_link() -> rabbit_types:ok_pid_or_error(). +-spec filename() -> string(). +-spec gen() -> guid(). +-spec gen_secure() -> guid(). +-spec string(guid(), any()) -> string(). +-spec binary(guid(), any()) -> binary(). %%---------------------------------------------------------------------------- diff --git a/src/rabbit_limiter.erl b/src/rabbit_limiter.erl index 0a2b8c5fc675..94620416d69a 100644 --- a/src/rabbit_limiter.erl +++ b/src/rabbit_limiter.erl @@ -140,44 +140,42 @@ -record(lstate, {pid, prefetch_limited}). -record(qstate, {pid, state, credits}). --ifdef(use_specs). - --type(lstate() :: #lstate{pid :: pid(), - prefetch_limited :: boolean()}). --type(qstate() :: #qstate{pid :: pid(), - state :: 'dormant' | 'active' | 'suspended'}). - --type(credit_mode() :: 'manual' | 'drain' | 'auto'). - --spec(start_link/1 :: (rabbit_types:proc_name()) -> - rabbit_types:ok_pid_or_error()). --spec(new/1 :: (pid()) -> lstate()). - --spec(limit_prefetch/3 :: (lstate(), non_neg_integer(), non_neg_integer()) - -> lstate()). --spec(unlimit_prefetch/1 :: (lstate()) -> lstate()). --spec(is_active/1 :: (lstate()) -> boolean()). --spec(get_prefetch_limit/1 :: (lstate()) -> non_neg_integer()). --spec(ack/2 :: (lstate(), non_neg_integer()) -> 'ok'). --spec(pid/1 :: (lstate()) -> pid()). - --spec(client/1 :: (pid()) -> qstate()). --spec(activate/1 :: (qstate()) -> qstate()). --spec(can_send/3 :: (qstate(), boolean(), rabbit_types:ctag()) -> - {'continue' | 'suspend', qstate()}). --spec(resume/1 :: (qstate()) -> qstate()). --spec(deactivate/1 :: (qstate()) -> qstate()). --spec(is_suspended/1 :: (qstate()) -> boolean()). --spec(is_consumer_blocked/2 :: (qstate(), rabbit_types:ctag()) -> boolean()). --spec(credit/5 :: (qstate(), rabbit_types:ctag(), non_neg_integer(), - credit_mode(), boolean()) -> {boolean(), qstate()}). --spec(ack_from_queue/3 :: (qstate(), rabbit_types:ctag(), non_neg_integer()) - -> {boolean(), qstate()}). --spec(drained/1 :: (qstate()) - -> {[{rabbit_types:ctag(), non_neg_integer()}], qstate()}). --spec(forget_consumer/2 :: (qstate(), rabbit_types:ctag()) -> qstate()). - --endif. +-type lstate() :: #lstate{pid :: pid(), + prefetch_limited :: boolean()}. +-type qstate() :: #qstate{pid :: pid(), + state :: 'dormant' | 'active' | 'suspended'}. + +-type credit_mode() :: 'manual' | 'drain' | 'auto'. + +-spec start_link(rabbit_types:proc_name()) -> + rabbit_types:ok_pid_or_error(). +-spec new(pid()) -> lstate(). + +-spec limit_prefetch(lstate(), non_neg_integer(), non_neg_integer()) -> + lstate(). +-spec unlimit_prefetch(lstate()) -> lstate(). +-spec is_active(lstate()) -> boolean(). +-spec get_prefetch_limit(lstate()) -> non_neg_integer(). +-spec ack(lstate(), non_neg_integer()) -> 'ok'. +-spec pid(lstate()) -> pid(). + +-spec client(pid()) -> qstate(). +-spec activate(qstate()) -> qstate(). +-spec can_send(qstate(), boolean(), rabbit_types:ctag()) -> + {'continue' | 'suspend', qstate()}. +-spec resume(qstate()) -> qstate(). +-spec deactivate(qstate()) -> qstate(). +-spec is_suspended(qstate()) -> boolean(). +-spec is_consumer_blocked(qstate(), rabbit_types:ctag()) -> boolean(). +-spec credit + (qstate(), rabbit_types:ctag(), non_neg_integer(), credit_mode(), + boolean()) -> + {boolean(), qstate()}. +-spec ack_from_queue(qstate(), rabbit_types:ctag(), non_neg_integer()) -> + {boolean(), qstate()}. +-spec drained(qstate()) -> + {[{rabbit_types:ctag(), non_neg_integer()}], qstate()}. +-spec forget_consumer(qstate(), rabbit_types:ctag()) -> qstate(). %%---------------------------------------------------------------------------- diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index 5b5468cab749..337fb23f840b 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -22,28 +22,24 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - -export_type([level/0]). --type(category() :: atom()). --type(level() :: 'debug' | 'info' | 'warning' | 'error'). - --spec(log/3 :: (category(), level(), string()) -> 'ok'). --spec(log/4 :: (category(), level(), string(), [any()]) -> 'ok'). +-type category() :: atom(). +-type level() :: 'debug' | 'info' | 'warning' | 'error'. --spec(debug/1 :: (string()) -> 'ok'). --spec(debug/2 :: (string(), [any()]) -> 'ok'). --spec(info/1 :: (string()) -> 'ok'). --spec(info/2 :: (string(), [any()]) -> 'ok'). --spec(warning/1 :: (string()) -> 'ok'). --spec(warning/2 :: (string(), [any()]) -> 'ok'). --spec(error/1 :: (string()) -> 'ok'). --spec(error/2 :: (string(), [any()]) -> 'ok'). +-spec log(category(), level(), string()) -> 'ok'. +-spec log(category(), level(), string(), [any()]) -> 'ok'. --spec(with_local_io/1 :: (fun (() -> A)) -> A). +-spec debug(string()) -> 'ok'. +-spec debug(string(), [any()]) -> 'ok'. +-spec info(string()) -> 'ok'. +-spec info(string(), [any()]) -> 'ok'. +-spec warning(string()) -> 'ok'. +-spec warning(string(), [any()]) -> 'ok'. +-spec error(string()) -> 'ok'. +-spec error(string(), [any()]) -> 'ok'. --endif. +-spec with_local_io(fun (() -> A)) -> A. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_memory_monitor.erl b/src/rabbit_memory_monitor.erl index 5e515bfb03fb..6fd12b30ff74 100644 --- a/src/rabbit_memory_monitor.erl +++ b/src/rabbit_memory_monitor.erl @@ -55,16 +55,12 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). --spec(register/2 :: (pid(), {atom(),atom(),[any()]}) -> 'ok'). --spec(deregister/1 :: (pid()) -> 'ok'). --spec(report_ram_duration/2 :: - (pid(), float() | 'infinity') -> number() | 'infinity'). --spec(stop/0 :: () -> 'ok'). - --endif. +-spec start_link() -> rabbit_types:ok_pid_or_error(). +-spec register(pid(), {atom(),atom(),[any()]}) -> 'ok'. +-spec deregister(pid()) -> 'ok'. +-spec report_ram_duration + (pid(), float() | 'infinity') -> number() | 'infinity'. +-spec stop() -> 'ok'. %%---------------------------------------------------------------------------- %% Public API diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index 1679767286e7..221f11f18a01 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -36,16 +36,13 @@ depth_fun }). --ifdef(use_specs). - --spec(start_link/4 :: (rabbit_types:amqqueue(), pid() | 'undefined', - rabbit_mirror_queue_master:death_fun(), - rabbit_mirror_queue_master:depth_fun()) -> - rabbit_types:ok_pid_or_error()). --spec(get_gm/1 :: (pid()) -> pid()). --spec(ensure_monitoring/2 :: (pid(), [pid()]) -> 'ok'). - --endif. +-spec start_link + (rabbit_types:amqqueue(), pid() | 'undefined', + rabbit_mirror_queue_master:death_fun(), + rabbit_mirror_queue_master:depth_fun()) -> + rabbit_types:ok_pid_or_error(). +-spec get_gm(pid()) -> pid(). +-spec ensure_monitoring(pid(), [pid()]) -> 'ok'. %%---------------------------------------------------------------------------- %% diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 9674a4ef2c40..d081b15bc76c 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -47,14 +47,12 @@ wait_timeout }). --ifdef(use_specs). - -export_type([death_fun/0, depth_fun/0, stats_fun/0]). --type(death_fun() :: fun ((pid()) -> 'ok')). --type(depth_fun() :: fun (() -> 'ok')). --type(stats_fun() :: fun ((any()) -> 'ok')). --type(master_state() :: #state { name :: rabbit_amqqueue:name(), +-type death_fun() :: fun ((pid()) -> 'ok'). +-type depth_fun() :: fun (() -> 'ok'). +-type stats_fun() :: fun ((any()) -> 'ok'). +-type master_state() :: #state { name :: rabbit_amqqueue:name(), gm :: pid(), coordinator :: pid(), backing_queue :: atom(), @@ -62,20 +60,19 @@ seen_status :: dict:dict(), confirmed :: [rabbit_guid:guid()], known_senders :: sets:set() - }). + }. --spec(promote_backing_queue_state/8 :: +-spec promote_backing_queue_state (rabbit_amqqueue:name(), pid(), atom(), any(), pid(), [any()], - dict:dict(), [pid()]) -> master_state()). --spec(sender_death_fun/0 :: () -> death_fun()). --spec(depth_fun/0 :: () -> depth_fun()). --spec(init_with_existing_bq/3 :: (rabbit_types:amqqueue(), atom(), any()) -> - master_state()). --spec(stop_mirroring/1 :: (master_state()) -> {atom(), any()}). --spec(sync_mirrors/3 :: (stats_fun(), stats_fun(), master_state()) -> - {'ok', master_state()} | {stop, any(), master_state()}). - --endif. + dict:dict(), [pid()]) -> + master_state(). +-spec sender_death_fun() -> death_fun(). +-spec depth_fun() -> depth_fun(). +-spec init_with_existing_bq(rabbit_types:amqqueue(), atom(), any()) -> + master_state(). +-spec stop_mirroring(master_state()) -> {atom(), any()}. +-spec sync_mirrors(stats_fun(), stats_fun(), master_state()) -> + {'ok', master_state()} | {stop, any(), master_state()}. %% For general documentation of HA design, see %% rabbit_mirror_queue_coordinator diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl index fad20711aa51..83350920e6c9 100644 --- a/src/rabbit_mirror_queue_misc.erl +++ b/src/rabbit_mirror_queue_misc.erl @@ -50,28 +50,24 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(remove_from_queue/3 :: - (rabbit_amqqueue:name(), pid(), [pid()]) - -> {'ok', pid(), [pid()], [node()]} | {'error', 'not_found'}). --spec(on_node_up/0 :: () -> 'ok'). --spec(add_mirrors/3 :: (rabbit_amqqueue:name(), [node()], 'sync' | 'async') - -> 'ok'). --spec(store_updated_slaves/1 :: (rabbit_types:amqqueue()) -> - rabbit_types:amqqueue()). --spec(initial_queue_node/2 :: (rabbit_types:amqqueue(), node()) -> node()). --spec(suggested_queue_nodes/1 :: (rabbit_types:amqqueue()) -> - {node(), [node()]}). --spec(is_mirrored/1 :: (rabbit_types:amqqueue()) -> boolean()). --spec(update_mirrors/2 :: - (rabbit_types:amqqueue(), rabbit_types:amqqueue()) -> 'ok'). --spec(maybe_drop_master_after_sync/1 :: (rabbit_types:amqqueue()) -> 'ok'). --spec(maybe_auto_sync/1 :: (rabbit_types:amqqueue()) -> 'ok'). --spec(log_info/3 :: (rabbit_amqqueue:name(), string(), [any()]) -> 'ok'). --spec(log_warning/3 :: (rabbit_amqqueue:name(), string(), [any()]) -> 'ok'). - --endif. +-spec remove_from_queue + (rabbit_amqqueue:name(), pid(), [pid()]) -> + {'ok', pid(), [pid()], [node()]} | {'error', 'not_found'}. +-spec on_node_up() -> 'ok'. +-spec add_mirrors(rabbit_amqqueue:name(), [node()], 'sync' | 'async') -> + 'ok'. +-spec store_updated_slaves(rabbit_types:amqqueue()) -> + rabbit_types:amqqueue(). +-spec initial_queue_node(rabbit_types:amqqueue(), node()) -> node(). +-spec suggested_queue_nodes(rabbit_types:amqqueue()) -> + {node(), [node()]}. +-spec is_mirrored(rabbit_types:amqqueue()) -> boolean(). +-spec update_mirrors + (rabbit_types:amqqueue(), rabbit_types:amqqueue()) -> 'ok'. +-spec maybe_drop_master_after_sync(rabbit_types:amqqueue()) -> 'ok'. +-spec maybe_auto_sync(rabbit_types:amqqueue()) -> 'ok'. +-spec log_info(rabbit_amqqueue:name(), string(), [any()]) -> 'ok'. +-spec log_warning(rabbit_amqqueue:name(), string(), [any()]) -> 'ok'. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_mirror_queue_mode.erl b/src/rabbit_mirror_queue_mode.erl index 5bb243746a09..3733c7f0f8df 100644 --- a/src/rabbit_mirror_queue_mode.erl +++ b/src/rabbit_mirror_queue_mode.erl @@ -16,11 +16,9 @@ -module(rabbit_mirror_queue_mode). --ifdef(use_specs). - --type(master() :: node()). --type(slave() :: node()). --type(params() :: any()). +-type master() :: node(). +-type slave() :: node(). +-type params() :: any(). -callback description() -> [proplists:property()]. @@ -44,14 +42,3 @@ %% Are the parameters valid for this mode? -callback validate_policy(params()) -> rabbit_policy_validator:validate_results(). - --else. - --export([behaviour_info/1]). - -behaviour_info(callbacks) -> - [{description, 0}, {suggested_queue_nodes, 5}, {validate_policy, 1}]; -behaviour_info(_Other) -> - undefined. - --endif. diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index c04c82f45ea7..6f46cdc69881 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -542,9 +542,8 @@ confirm_messages(MsgIds, State = #state { msg_id_status = MS }) -> handle_process_result({ok, State}) -> noreply(State); handle_process_result({stop, State}) -> {stop, normal, State}. --ifdef(use_specs). --spec(promote_me/2 :: ({pid(), term()}, #state{}) -> no_return()). --endif. +-spec promote_me({pid(), term()}, #state{}) -> no_return(). + promote_me(From, #state { q = Q = #amqqueue { name = QName }, gm = GM, backing_queue = BQ, diff --git a/src/rabbit_mirror_queue_sync.erl b/src/rabbit_mirror_queue_sync.erl index a97a9b50c86a..54f0855fce44 100644 --- a/src/rabbit_mirror_queue_sync.erl +++ b/src/rabbit_mirror_queue_sync.erl @@ -52,32 +52,28 @@ %% || || -- sync_complete --> || %% || (Dies) || --ifdef(use_specs). - --type(log_fun() :: fun ((string(), [any()]) -> 'ok')). --type(bq() :: atom()). --type(bqs() :: any()). --type(ack() :: any()). --type(slave_sync_state() :: {[{rabbit_types:msg_id(), ack()}], timer:tref(), - bqs()}). - --spec(master_prepare/4 :: (reference(), rabbit_amqqueue:name(), - log_fun(), [pid()]) -> pid()). --spec(master_go/8 :: (pid(), reference(), log_fun(), +-type log_fun() :: fun ((string(), [any()]) -> 'ok'). +-type bq() :: atom(). +-type bqs() :: any(). +-type ack() :: any(). +-type slave_sync_state() :: {[{rabbit_types:msg_id(), ack()}], timer:tref(), + bqs()}. + +-spec master_prepare(reference(), rabbit_amqqueue:name(), + log_fun(), [pid()]) -> pid(). +-spec master_go(pid(), reference(), log_fun(), rabbit_mirror_queue_master:stats_fun(), rabbit_mirror_queue_master:stats_fun(), non_neg_integer(), bq(), bqs()) -> {'already_synced', bqs()} | {'ok', bqs()} | {'shutdown', any(), bqs()} | - {'sync_died', any(), bqs()}). --spec(slave/7 :: (non_neg_integer(), reference(), timer:tref(), pid(), + {'sync_died', any(), bqs()}. +-spec slave(non_neg_integer(), reference(), timer:tref(), pid(), bq(), bqs(), fun((bq(), bqs()) -> {timer:tref(), bqs()})) -> 'denied' | {'ok' | 'failed', slave_sync_state()} | - {'stop', any(), slave_sync_state()}). - --endif. + {'stop', any(), slave_sync_state()}. %% --------------------------------------------------------------------------- %% Master diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 6a57f6bb2cd4..c327b33e5deb 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -50,48 +50,44 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - -export_type([node_type/0, cluster_status/0]). --type(node_type() :: disc | ram). --type(cluster_status() :: {[node()], [node()], [node()]}). +-type node_type() :: disc | ram. +-type cluster_status() :: {[node()], [node()], [node()]}. %% Main interface --spec(init/0 :: () -> 'ok'). --spec(join_cluster/2 :: (node(), node_type()) - -> 'ok' | {'ok', 'already_member'}). --spec(reset/0 :: () -> 'ok'). --spec(force_reset/0 :: () -> 'ok'). --spec(update_cluster_nodes/1 :: (node()) -> 'ok'). --spec(change_cluster_node_type/1 :: (node_type()) -> 'ok'). --spec(forget_cluster_node/2 :: (node(), boolean()) -> 'ok'). --spec(force_load_next_boot/0 :: () -> 'ok'). +-spec init() -> 'ok'. +-spec join_cluster(node(), node_type()) + -> 'ok' | {'ok', 'already_member'}. +-spec reset() -> 'ok'. +-spec force_reset() -> 'ok'. +-spec update_cluster_nodes(node()) -> 'ok'. +-spec change_cluster_node_type(node_type()) -> 'ok'. +-spec forget_cluster_node(node(), boolean()) -> 'ok'. +-spec force_load_next_boot() -> 'ok'. %% Various queries to get the status of the db --spec(status/0 :: () -> [{'nodes', [{node_type(), [node()]}]} | +-spec status() -> [{'nodes', [{node_type(), [node()]}]} | {'running_nodes', [node()]} | - {'partitions', [{node(), [node()]}]}]). --spec(is_clustered/0 :: () -> boolean()). --spec(on_running_node/1 :: (pid()) -> boolean()). --spec(is_process_alive/1 :: (pid()) -> boolean()). --spec(cluster_nodes/1 :: ('all' | 'disc' | 'ram' | 'running') -> [node()]). --spec(node_type/0 :: () -> node_type()). --spec(dir/0 :: () -> file:filename()). --spec(cluster_status_from_mnesia/0 :: () -> rabbit_types:ok_or_error2( - cluster_status(), any())). + {'partitions', [{node(), [node()]}]}]. +-spec is_clustered() -> boolean(). +-spec on_running_node(pid()) -> boolean(). +-spec is_process_alive(pid()) -> boolean(). +-spec cluster_nodes('all' | 'disc' | 'ram' | 'running') -> [node()]. +-spec node_type() -> node_type(). +-spec dir() -> file:filename(). +-spec cluster_status_from_mnesia() -> rabbit_types:ok_or_error2( + cluster_status(), any()). %% Operations on the db and utils, mainly used in `rabbit_upgrade' and `rabbit' --spec(init_db_unchecked/2 :: ([node()], node_type()) -> 'ok'). --spec(copy_db/1 :: (file:filename()) -> rabbit_types:ok_or_error(any())). --spec(check_cluster_consistency/0 :: () -> 'ok'). --spec(ensure_mnesia_dir/0 :: () -> 'ok'). +-spec init_db_unchecked([node()], node_type()) -> 'ok'. +-spec copy_db(file:filename()) -> rabbit_types:ok_or_error(any()). +-spec check_cluster_consistency() -> 'ok'. +-spec ensure_mnesia_dir() -> 'ok'. %% Hooks used in `rabbit_node_monitor' --spec(on_node_up/1 :: (node()) -> 'ok'). --spec(on_node_down/1 :: (node()) -> 'ok'). - --endif. +-spec on_node_up(node()) -> 'ok'. +-spec on_node_down(node()) -> 'ok'. %%---------------------------------------------------------------------------- %% Main interface diff --git a/src/rabbit_mnesia_rename.erl b/src/rabbit_mnesia_rename.erl index 1ece103cec8a..0945e3152234 100644 --- a/src/rabbit_mnesia_rename.erl +++ b/src/rabbit_mnesia_rename.erl @@ -43,12 +43,8 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(rename/2 :: (node(), [{node(), node()}]) -> 'ok'). --spec(maybe_finish/1 :: ([node()]) -> 'ok'). - --endif. +-spec rename(node(), [{node(), node()}]) -> 'ok'. +-spec maybe_finish([node()]) -> 'ok'. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index 09a2407bec87..5c0acc5ffd90 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -33,25 +33,21 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --type(io_device() :: any()). --type(position() :: non_neg_integer()). --type(msg_size() :: non_neg_integer()). --type(file_size() :: non_neg_integer()). --type(message_accumulator(A) :: +-type io_device() :: any(). +-type position() :: non_neg_integer(). +-type msg_size() :: non_neg_integer(). +-type file_size() :: non_neg_integer(). +-type message_accumulator(A) :: fun (({rabbit_types:msg_id(), msg_size(), position(), binary()}, A) -> - A)). - --spec(append/3 :: (io_device(), rabbit_types:msg_id(), msg()) -> - rabbit_types:ok_or_error2(msg_size(), any())). --spec(read/2 :: (io_device(), msg_size()) -> - rabbit_types:ok_or_error2({rabbit_types:msg_id(), msg()}, - any())). --spec(scan/4 :: (io_device(), file_size(), message_accumulator(A), A) -> - {'ok', A, position()}). - --endif. + A). + +-spec append(io_device(), rabbit_types:msg_id(), msg()) -> + rabbit_types:ok_or_error2(msg_size(), any()). +-spec read(io_device(), msg_size()) -> + rabbit_types:ok_or_error2({rabbit_types:msg_id(), msg()}, + any()). +-spec scan(io_device(), file_size(), message_accumulator(A), A) -> + {'ok', A, position()}. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 230066468734..2a6e2cd1037e 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -133,22 +133,20 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - -export_type([gc_state/0, file_num/0]). --type(gc_state() :: #gc_state { dir :: file:filename(), +-type gc_state() :: #gc_state { dir :: file:filename(), index_module :: atom(), index_state :: any(), file_summary_ets :: ets:tid(), file_handles_ets :: ets:tid(), msg_store :: server() - }). + }. --type(server() :: pid() | atom()). --type(client_ref() :: binary()). --type(file_num() :: non_neg_integer()). --type(client_msstate() :: #client_msstate { +-type server() :: pid() | atom(). +-type client_ref() :: binary(). +-type file_num() :: non_neg_integer(). +-type client_msstate() :: #client_msstate { server :: server(), client_ref :: client_ref(), file_handle_cache :: dict:dict(), @@ -160,43 +158,41 @@ file_summary_ets :: ets:tid(), cur_file_cache_ets :: ets:tid(), flying_ets :: ets:tid(), - credit_disc_bound :: {pos_integer(), pos_integer()}}). --type(msg_ref_delta_gen(A) :: + credit_disc_bound :: {pos_integer(), pos_integer()}}. +-type msg_ref_delta_gen(A) :: fun ((A) -> 'finished' | - {rabbit_types:msg_id(), non_neg_integer(), A})). --type(maybe_msg_id_fun() :: - 'undefined' | fun ((gb_sets:set(), 'written' | 'ignored') -> any())). --type(maybe_close_fds_fun() :: 'undefined' | fun (() -> 'ok')). --type(deletion_thunk() :: fun (() -> boolean())). + {rabbit_types:msg_id(), non_neg_integer(), A}). +-type maybe_msg_id_fun() :: + 'undefined' | fun ((gb_sets:set(), 'written' | 'ignored') -> any()). +-type maybe_close_fds_fun() :: 'undefined' | fun (() -> 'ok'). +-type deletion_thunk() :: fun (() -> boolean()). --spec(start_link/4 :: +-spec start_link (atom(), file:filename(), [binary()] | 'undefined', - {msg_ref_delta_gen(A), A}) -> rabbit_types:ok_pid_or_error()). --spec(successfully_recovered_state/1 :: (server()) -> boolean()). --spec(client_init/4 :: (server(), client_ref(), maybe_msg_id_fun(), - maybe_close_fds_fun()) -> client_msstate()). --spec(client_terminate/1 :: (client_msstate()) -> 'ok'). --spec(client_delete_and_terminate/1 :: (client_msstate()) -> 'ok'). --spec(client_ref/1 :: (client_msstate()) -> client_ref()). --spec(close_all_indicated/1 :: - (client_msstate()) -> rabbit_types:ok(client_msstate())). --spec(write/3 :: (rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok'). --spec(write_flow/3 :: (rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok'). --spec(read/2 :: (rabbit_types:msg_id(), client_msstate()) -> - {rabbit_types:ok(msg()) | 'not_found', client_msstate()}). --spec(contains/2 :: (rabbit_types:msg_id(), client_msstate()) -> boolean()). --spec(remove/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok'). - --spec(set_maximum_since_use/2 :: (server(), non_neg_integer()) -> 'ok'). --spec(has_readers/2 :: (non_neg_integer(), gc_state()) -> boolean()). --spec(combine_files/3 :: (non_neg_integer(), non_neg_integer(), gc_state()) -> - deletion_thunk()). --spec(delete_file/2 :: (non_neg_integer(), gc_state()) -> deletion_thunk()). --spec(force_recovery/2 :: (file:filename(), server()) -> 'ok'). --spec(transform_dir/3 :: (file:filename(), server(), - fun ((any()) -> (rabbit_types:ok_or_error2(msg(), any())))) -> 'ok'). - --endif. + {msg_ref_delta_gen(A), A}) -> rabbit_types:ok_pid_or_error(). +-spec successfully_recovered_state(server()) -> boolean(). +-spec client_init(server(), client_ref(), maybe_msg_id_fun(), + maybe_close_fds_fun()) -> client_msstate(). +-spec client_terminate(client_msstate()) -> 'ok'. +-spec client_delete_and_terminate(client_msstate()) -> 'ok'. +-spec client_ref(client_msstate()) -> client_ref(). +-spec close_all_indicated + (client_msstate()) -> rabbit_types:ok(client_msstate()). +-spec write(rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok'. +-spec write_flow(rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok'. +-spec read(rabbit_types:msg_id(), client_msstate()) -> + {rabbit_types:ok(msg()) | 'not_found', client_msstate()}. +-spec contains(rabbit_types:msg_id(), client_msstate()) -> boolean(). +-spec remove([rabbit_types:msg_id()], client_msstate()) -> 'ok'. + +-spec set_maximum_since_use(server(), non_neg_integer()) -> 'ok'. +-spec has_readers(non_neg_integer(), gc_state()) -> boolean(). +-spec combine_files(non_neg_integer(), non_neg_integer(), gc_state()) -> + deletion_thunk(). +-spec delete_file(non_neg_integer(), gc_state()) -> deletion_thunk(). +-spec force_recovery(file:filename(), server()) -> 'ok'. +-spec transform_dir(file:filename(), server(), + fun ((any()) -> (rabbit_types:ok_or_error2(msg(), any())))) -> 'ok'. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_msg_store_gc.erl b/src/rabbit_msg_store_gc.erl index b27aaf4c8485..9cfdba8a8d57 100644 --- a/src/rabbit_msg_store_gc.erl +++ b/src/rabbit_msg_store_gc.erl @@ -35,18 +35,14 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start_link/1 :: (rabbit_msg_store:gc_state()) -> - rabbit_types:ok_pid_or_error()). --spec(combine/3 :: (pid(), rabbit_msg_store:file_num(), - rabbit_msg_store:file_num()) -> 'ok'). --spec(delete/2 :: (pid(), rabbit_msg_store:file_num()) -> 'ok'). --spec(no_readers/2 :: (pid(), rabbit_msg_store:file_num()) -> 'ok'). --spec(stop/1 :: (pid()) -> 'ok'). --spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok'). - --endif. +-spec start_link(rabbit_msg_store:gc_state()) -> + rabbit_types:ok_pid_or_error(). +-spec combine(pid(), rabbit_msg_store:file_num(), + rabbit_msg_store:file_num()) -> 'ok'. +-spec delete(pid(), rabbit_msg_store:file_num()) -> 'ok'. +-spec no_readers(pid(), rabbit_msg_store:file_num()) -> 'ok'. +-spec stop(pid()) -> 'ok'. +-spec set_maximum_since_use(pid(), non_neg_integer()) -> 'ok'. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_node_monitor.erl b/src/rabbit_node_monitor.erl index 976f4a4b2f23..0322aacfd151 100644 --- a/src/rabbit_node_monitor.erl +++ b/src/rabbit_node_monitor.erl @@ -45,35 +45,31 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). - --spec(running_nodes_filename/0 :: () -> string()). --spec(cluster_status_filename/0 :: () -> string()). --spec(prepare_cluster_status_files/0 :: () -> 'ok'). --spec(write_cluster_status/1 :: (rabbit_mnesia:cluster_status()) -> 'ok'). --spec(read_cluster_status/0 :: () -> rabbit_mnesia:cluster_status()). --spec(update_cluster_status/0 :: () -> 'ok'). --spec(reset_cluster_status/0 :: () -> 'ok'). - --spec(notify_node_up/0 :: () -> 'ok'). --spec(notify_joined_cluster/0 :: () -> 'ok'). --spec(notify_left_cluster/1 :: (node()) -> 'ok'). - --spec(partitions/0 :: () -> [node()]). --spec(partitions/1 :: ([node()]) -> [{node(), [node()]}]). --spec(status/1 :: ([node()]) -> {[{node(), [node()]}], [node()]}). --spec(subscribe/1 :: (pid()) -> 'ok'). --spec(pause_partition_guard/0 :: () -> 'ok' | 'pausing'). - --spec(all_rabbit_nodes_up/0 :: () -> boolean()). --spec(run_outside_applications/2 :: (fun (() -> any()), boolean()) -> pid()). --spec(ping_all/0 :: () -> 'ok'). --spec(alive_nodes/1 :: ([node()]) -> [node()]). --spec(alive_rabbit_nodes/1 :: ([node()]) -> [node()]). - --endif. +-spec start_link() -> rabbit_types:ok_pid_or_error(). + +-spec running_nodes_filename() -> string(). +-spec cluster_status_filename() -> string(). +-spec prepare_cluster_status_files() -> 'ok'. +-spec write_cluster_status(rabbit_mnesia:cluster_status()) -> 'ok'. +-spec read_cluster_status() -> rabbit_mnesia:cluster_status(). +-spec update_cluster_status() -> 'ok'. +-spec reset_cluster_status() -> 'ok'. + +-spec notify_node_up() -> 'ok'. +-spec notify_joined_cluster() -> 'ok'. +-spec notify_left_cluster(node()) -> 'ok'. + +-spec partitions() -> [node()]. +-spec partitions([node()]) -> [{node(), [node()]}]. +-spec status([node()]) -> {[{node(), [node()]}], [node()]}. +-spec subscribe(pid()) -> 'ok'. +-spec pause_partition_guard() -> 'ok' | 'pausing'. + +-spec all_rabbit_nodes_up() -> boolean(). +-spec run_outside_applications(fun (() -> any()), boolean()) -> pid(). +-spec ping_all() -> 'ok'. +-spec alive_nodes([node()]) -> [node()]. +-spec alive_rabbit_nodes([node()]) -> [node()]. %%---------------------------------------------------------------------------- %% Start diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl index c7f5d501bf23..4d8966f7e27a 100644 --- a/src/rabbit_plugins.erl +++ b/src/rabbit_plugins.erl @@ -22,19 +22,16 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --type(plugin_name() :: atom()). - --spec(setup/0 :: () -> [plugin_name()]). --spec(active/0 :: () -> [plugin_name()]). --spec(list/1 :: (string()) -> [#plugin{}]). --spec(list/2 :: (string(), boolean()) -> [#plugin{}]). --spec(read_enabled/1 :: (file:filename()) -> [plugin_name()]). --spec(dependencies/3 :: (boolean(), [plugin_name()], [#plugin{}]) -> - [plugin_name()]). --spec(ensure/1 :: (string()) -> {'ok', [atom()], [atom()]} | {error, any()}). --endif. +-type plugin_name() :: atom(). + +-spec setup() -> [plugin_name()]. +-spec active() -> [plugin_name()]. +-spec list(string()) -> [#plugin{}]. +-spec list(string(), boolean()) -> [#plugin{}]. +-spec read_enabled(file:filename()) -> [plugin_name()]. +-spec dependencies(boolean(), [plugin_name()], [#plugin{}]) -> + [plugin_name()]. +-spec ensure(string()) -> {'ok', [atom()], [atom()]} | {error, any()}. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_plugins_main.erl b/src/rabbit_plugins_main.erl index e248989a7a90..ff516268c6e5 100644 --- a/src/rabbit_plugins_main.erl +++ b/src/rabbit_plugins_main.erl @@ -32,12 +32,8 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start/0 :: () -> no_return()). --spec(stop/0 :: () -> 'ok'). - --endif. +-spec start() -> no_return(). +-spec stop() -> 'ok'. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 5ecdd75acc42..569a8d6c5a08 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -30,12 +30,8 @@ %% Specs %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start/0 :: () -> no_return()). --spec(stop/0 :: () -> 'ok'). - --endif. +-spec start() -> no_return(). +-spec stop() -> 'ok'. %%---------------------------------------------------------------------------- @@ -129,10 +125,9 @@ dist_port_use_check_ipv6(NodeHost, Port) -> {error, _} -> dist_port_use_check_fail(Port, NodeHost) end. --ifdef(use_specs). --spec(dist_port_use_check_fail/2 :: (non_neg_integer(), string()) -> - no_return()). --endif. +-spec dist_port_use_check_fail(non_neg_integer(), string()) -> + no_return(). + dist_port_use_check_fail(Port, Host) -> {ok, Names} = rabbit_nodes:names(Host), case [N || {N, P} <- Names, P =:= Port] of diff --git a/src/rabbit_prequeue.erl b/src/rabbit_prequeue.erl index af96ea9f6fd2..5b2c24acab14 100644 --- a/src/rabbit_prequeue.erl +++ b/src/rabbit_prequeue.erl @@ -33,16 +33,12 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - -export_type([start_mode/0]). --type(start_mode() :: 'declare' | 'recovery' | 'slave'). - --spec(start_link/3 :: (rabbit_types:amqqueue(), start_mode(), pid()) - -> rabbit_types:ok_pid_or_error()). +-type start_mode() :: 'declare' | 'recovery' | 'slave'. --endif. +-spec start_link(rabbit_types:amqqueue(), start_mode(), pid()) + -> rabbit_types:ok_pid_or_error(). %%---------------------------------------------------------------------------- diff --git a/src/rabbit_queue_consumers.erl b/src/rabbit_queue_consumers.erl index 5b5c9b30744a..a8002398e7c5 100644 --- a/src/rabbit_queue_consumers.erl +++ b/src/rabbit_queue_consumers.erl @@ -49,8 +49,6 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - -type time_micros() :: non_neg_integer(). -type ratio() :: float(). -type state() :: #state{consumers ::priority_queue:q(), @@ -94,8 +92,6 @@ state()) -> 'unchanged' | {'unblocked', state()}. -spec utilisation(state()) -> ratio(). --endif. - %%---------------------------------------------------------------------------- new() -> #state{consumers = priority_queue:new(), diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 06b6961edb8e..8b96bbffbda9 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -193,20 +193,18 @@ -rabbit_upgrade({store_msg_size, local, [avoid_zeroes]}). -rabbit_upgrade({store_msg, local, [store_msg_size]}). --ifdef(use_specs). - --type(hdl() :: ('undefined' | any())). --type(segment() :: ('undefined' | +-type hdl() :: ('undefined' | any()). +-type segment() :: ('undefined' | #segment { num :: non_neg_integer(), path :: file:filename(), journal_entries :: array:array(), entries_to_segment :: array:array(), unacked :: non_neg_integer() - })). --type(seq_id() :: integer()). --type(seg_dict() :: {dict:dict(), [segment()]}). --type(on_sync_fun() :: fun ((gb_sets:set()) -> ok)). --type(qistate() :: #qistate { dir :: file:filename(), + }). +-type seq_id() :: integer(). +-type seg_dict() :: {dict:dict(), [segment()]}. +-type on_sync_fun() :: fun ((gb_sets:set()) -> ok). +-type qistate() :: #qistate { dir :: file:filename(), segments :: 'undefined' | seg_dict(), journal_handle :: hdl(), dirty_count :: integer(), @@ -217,43 +215,41 @@ unconfirmed_msg :: gb_sets:set(), pre_publish_cache :: list(), delivered_cache :: list() - }). --type(contains_predicate() :: fun ((rabbit_types:msg_id()) -> boolean())). --type(walker(A) :: fun ((A) -> 'finished' | - {rabbit_types:msg_id(), non_neg_integer(), A})). --type(shutdown_terms() :: [term()] | 'non_clean_shutdown'). - --spec(erase/1 :: (rabbit_amqqueue:name()) -> 'ok'). --spec(reset_state/1 :: (qistate()) -> qistate()). --spec(init/3 :: (rabbit_amqqueue:name(), - on_sync_fun(), on_sync_fun()) -> qistate()). --spec(recover/6 :: (rabbit_amqqueue:name(), shutdown_terms(), boolean(), + }. +-type contains_predicate() :: fun ((rabbit_types:msg_id()) -> boolean()). +-type walker(A) :: fun ((A) -> 'finished' | + {rabbit_types:msg_id(), non_neg_integer(), A}). +-type shutdown_terms() :: [term()] | 'non_clean_shutdown'. + +-spec erase(rabbit_amqqueue:name()) -> 'ok'. +-spec reset_state(qistate()) -> qistate(). +-spec init(rabbit_amqqueue:name(), + on_sync_fun(), on_sync_fun()) -> qistate(). +-spec recover(rabbit_amqqueue:name(), shutdown_terms(), boolean(), contains_predicate(), on_sync_fun(), on_sync_fun()) -> {'undefined' | non_neg_integer(), - 'undefined' | non_neg_integer(), qistate()}). --spec(terminate/2 :: ([any()], qistate()) -> qistate()). --spec(delete_and_terminate/1 :: (qistate()) -> qistate()). --spec(publish/6 :: (rabbit_types:msg_id(), seq_id(), + 'undefined' | non_neg_integer(), qistate()}. +-spec terminate([any()], qistate()) -> qistate(). +-spec delete_and_terminate(qistate()) -> qistate(). +-spec publish(rabbit_types:msg_id(), seq_id(), rabbit_types:message_properties(), boolean(), - non_neg_integer(), qistate()) -> qistate()). --spec(deliver/2 :: ([seq_id()], qistate()) -> qistate()). --spec(ack/2 :: ([seq_id()], qistate()) -> qistate()). --spec(sync/1 :: (qistate()) -> qistate()). --spec(needs_sync/1 :: (qistate()) -> 'confirms' | 'other' | 'false'). --spec(flush/1 :: (qistate()) -> qistate()). --spec(read/3 :: (seq_id(), seq_id(), qistate()) -> + non_neg_integer(), qistate()) -> qistate(). +-spec deliver([seq_id()], qistate()) -> qistate(). +-spec ack([seq_id()], qistate()) -> qistate(). +-spec sync(qistate()) -> qistate(). +-spec needs_sync(qistate()) -> 'confirms' | 'other' | 'false'. +-spec flush(qistate()) -> qistate(). +-spec read(seq_id(), seq_id(), qistate()) -> {[{rabbit_types:msg_id(), seq_id(), rabbit_types:message_properties(), - boolean(), boolean()}], qistate()}). --spec(next_segment_boundary/1 :: (seq_id()) -> seq_id()). --spec(bounds/1 :: (qistate()) -> - {non_neg_integer(), non_neg_integer(), qistate()}). --spec(start/1 :: ([rabbit_amqqueue:name()]) -> {[[any()]], {walker(A), A}}). - --spec(add_queue_ttl/0 :: () -> 'ok'). + boolean(), boolean()}], qistate()}. +-spec next_segment_boundary(seq_id()) -> seq_id(). +-spec bounds(qistate()) -> + {non_neg_integer(), non_neg_integer(), qistate()}. +-spec start([rabbit_amqqueue:name()]) -> {[[any()]], {walker(A), A}}. --endif. +-spec add_queue_ttl() -> 'ok'. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_recovery_terms.erl b/src/rabbit_recovery_terms.erl index e47ae67bfb39..f6f94eca45b1 100644 --- a/src/rabbit_recovery_terms.erl +++ b/src/rabbit_recovery_terms.erl @@ -34,15 +34,11 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start() -> rabbit_types:ok_or_error(term())). --spec(stop() -> rabbit_types:ok_or_error(term())). --spec(store(file:filename(), term()) -> rabbit_types:ok_or_error(term())). --spec(read(file:filename()) -> rabbit_types:ok_or_error2(term(), not_found)). --spec(clear() -> 'ok'). - --endif. % use_specs +-spec start() -> rabbit_types:ok_or_error(term()). +-spec stop() -> rabbit_types:ok_or_error(term()). +-spec store(file:filename(), term()) -> rabbit_types:ok_or_error(term()). +-spec read(file:filename()) -> rabbit_types:ok_or_error2(term(), not_found). +-spec clear() -> 'ok'. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_registry.erl b/src/rabbit_registry.erl index db61c958ec2d..0428c3533fe5 100644 --- a/src/rabbit_registry.erl +++ b/src/rabbit_registry.erl @@ -29,18 +29,14 @@ -define(SERVER, ?MODULE). -define(ETS_NAME, ?MODULE). --ifdef(use_specs). - --spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). --spec(register/3 :: (atom(), binary(), atom()) -> 'ok'). --spec(unregister/2 :: (atom(), binary()) -> 'ok'). --spec(binary_to_type/1 :: - (binary()) -> atom() | rabbit_types:error('not_found')). --spec(lookup_module/2 :: - (atom(), atom()) -> rabbit_types:ok_or_error2(atom(), 'not_found')). --spec(lookup_all/1 :: (atom()) -> [{atom(), atom()}]). - --endif. +-spec start_link() -> rabbit_types:ok_pid_or_error(). +-spec register(atom(), binary(), atom()) -> 'ok'. +-spec unregister(atom(), binary()) -> 'ok'. +-spec binary_to_type + (binary()) -> atom() | rabbit_types:error('not_found'). +-spec lookup_module + (atom(), atom()) -> rabbit_types:ok_or_error2(atom(), 'not_found'). +-spec lookup_all(atom()) -> [{atom(), atom()}]. %%--------------------------------------------------------------------------- diff --git a/src/rabbit_resource_monitor_misc.erl b/src/rabbit_resource_monitor_misc.erl index 80db6e87e118..56faefe536ae 100644 --- a/src/rabbit_resource_monitor_misc.erl +++ b/src/rabbit_resource_monitor_misc.erl @@ -19,12 +19,8 @@ -export([parse_information_unit/1]). --ifdef(use_spec). - --spec(parse_information_unit/1 :: (integer() | string()) -> - {ok, integer()} | {error, parse_error}). - --endif. +-spec parse_information_unit(integer() | string()) -> + {ok, integer()} | {error, parse_error}. parse_information_unit(Value) when is_integer(Value) -> {ok, Value}; parse_information_unit(Value) when is_list(Value) -> diff --git a/src/rabbit_restartable_sup.erl b/src/rabbit_restartable_sup.erl index 8517718add54..196025918827 100644 --- a/src/rabbit_restartable_sup.erl +++ b/src/rabbit_restartable_sup.erl @@ -28,12 +28,8 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start_link/3 :: (atom(), rabbit_types:mfargs(), boolean()) -> - rabbit_types:ok_pid_or_error()). - --endif. +-spec start_link(atom(), rabbit_types:mfargs(), boolean()) -> + rabbit_types:ok_pid_or_error(). %%---------------------------------------------------------------------------- diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index 42b67d6681c7..d4390ac4d85a 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -22,21 +22,17 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - -export_type([routing_key/0, match_result/0]). --type(routing_key() :: binary()). --type(match_result() :: [rabbit_types:binding_destination()]). +-type routing_key() :: binary(). +-type match_result() :: [rabbit_types:binding_destination()]. --spec(match_bindings/2 :: (rabbit_types:binding_source(), +-spec match_bindings(rabbit_types:binding_source(), fun ((rabbit_types:binding()) -> boolean())) -> - match_result()). --spec(match_routing_key/2 :: (rabbit_types:binding_source(), + match_result(). +-spec match_routing_key(rabbit_types:binding_source(), [routing_key()] | ['_']) -> - match_result()). - --endif. + match_result(). %%---------------------------------------------------------------------------- diff --git a/src/rabbit_runtime_parameters.erl b/src/rabbit_runtime_parameters.erl index ba1a830df1b7..97f78da8ba5c 100644 --- a/src/rabbit_runtime_parameters.erl +++ b/src/rabbit_runtime_parameters.erl @@ -57,38 +57,34 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --type(ok_or_error_string() :: 'ok' | {'error_string', string()}). --type(ok_thunk_or_error_string() :: ok_or_error_string() | fun(() -> 'ok')). - --spec(parse_set/5 :: (rabbit_types:vhost(), binary(), binary(), string(), - rabbit_types:user() | 'none') -> ok_or_error_string()). --spec(set/5 :: (rabbit_types:vhost(), binary(), binary(), term(), - rabbit_types:user() | 'none') -> ok_or_error_string()). --spec(set_any/5 :: (rabbit_types:vhost(), binary(), binary(), term(), - rabbit_types:user() | 'none') -> ok_or_error_string()). --spec(set_global/2 :: (atom(), term()) -> 'ok'). --spec(clear/3 :: (rabbit_types:vhost(), binary(), binary()) - -> ok_thunk_or_error_string()). --spec(clear_any/3 :: (rabbit_types:vhost(), binary(), binary()) - -> ok_thunk_or_error_string()). --spec(list/0 :: () -> [rabbit_types:infos()]). --spec(list/1 :: (rabbit_types:vhost() | '_') -> [rabbit_types:infos()]). --spec(list_component/1 :: (binary()) -> [rabbit_types:infos()]). --spec(list/2 :: (rabbit_types:vhost() | '_', binary() | '_') - -> [rabbit_types:infos()]). --spec(list_formatted/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]). --spec(list_formatted/3 :: (rabbit_types:vhost(), reference(), pid()) -> 'ok'). --spec(lookup/3 :: (rabbit_types:vhost(), binary(), binary()) - -> rabbit_types:infos() | 'not_found'). --spec(value/3 :: (rabbit_types:vhost(), binary(), binary()) -> term()). --spec(value/4 :: (rabbit_types:vhost(), binary(), binary(), term()) -> term()). --spec(value_global/1 :: (atom()) -> term() | 'not_found'). --spec(value_global/2 :: (atom(), term()) -> term()). --spec(info_keys/0 :: () -> rabbit_types:info_keys()). - --endif. +-type ok_or_error_string() :: 'ok' | {'error_string', string()}. +-type ok_thunk_or_error_string() :: ok_or_error_string() | fun(() -> 'ok'). + +-spec parse_set(rabbit_types:vhost(), binary(), binary(), string(), + rabbit_types:user() | 'none') -> ok_or_error_string(). +-spec set(rabbit_types:vhost(), binary(), binary(), term(), + rabbit_types:user() | 'none') -> ok_or_error_string(). +-spec set_any(rabbit_types:vhost(), binary(), binary(), term(), + rabbit_types:user() | 'none') -> ok_or_error_string(). +-spec set_global(atom(), term()) -> 'ok'. +-spec clear(rabbit_types:vhost(), binary(), binary()) + -> ok_thunk_or_error_string(). +-spec clear_any(rabbit_types:vhost(), binary(), binary()) + -> ok_thunk_or_error_string(). +-spec list() -> [rabbit_types:infos()]. +-spec list(rabbit_types:vhost() | '_') -> [rabbit_types:infos()]. +-spec list_component(binary()) -> [rabbit_types:infos()]. +-spec list(rabbit_types:vhost() | '_', binary() | '_') + -> [rabbit_types:infos()]. +-spec list_formatted(rabbit_types:vhost()) -> [rabbit_types:infos()]. +-spec list_formatted(rabbit_types:vhost(), reference(), pid()) -> 'ok'. +-spec lookup(rabbit_types:vhost(), binary(), binary()) + -> rabbit_types:infos() | 'not_found'. +-spec value(rabbit_types:vhost(), binary(), binary()) -> term(). +-spec value(rabbit_types:vhost(), binary(), binary(), term()) -> term(). +-spec value_global(atom()) -> term() | 'not_found'. +-spec value_global(atom(), term()) -> term(). +-spec info_keys() -> rabbit_types:info_keys(). %%--------------------------------------------------------------------------- diff --git a/src/rabbit_ssl.erl b/src/rabbit_ssl.erl index 38769e1835e1..ac9fb204d0b1 100644 --- a/src/rabbit_ssl.erl +++ b/src/rabbit_ssl.erl @@ -25,21 +25,17 @@ %%-------------------------------------------------------------------------- --ifdef(use_specs). - -export_type([certificate/0]). --type(certificate() :: binary()). - --spec(peer_cert_issuer/1 :: (certificate()) -> string()). --spec(peer_cert_subject/1 :: (certificate()) -> string()). --spec(peer_cert_validity/1 :: (certificate()) -> string()). --spec(peer_cert_subject_items/2 :: - (certificate(), tuple()) -> [string()] | 'not_found'). --spec(peer_cert_auth_name/1 :: - (certificate()) -> binary() | 'not_found' | 'unsafe'). +-type certificate() :: binary(). --endif. +-spec peer_cert_issuer(certificate()) -> string(). +-spec peer_cert_subject(certificate()) -> string(). +-spec peer_cert_validity(certificate()) -> string(). +-spec peer_cert_subject_items + (certificate(), tuple()) -> [string()] | 'not_found'. +-spec peer_cert_auth_name + (certificate()) -> binary() | 'not_found' | 'unsafe'. %%-------------------------------------------------------------------------- %% High-level functions used by reader diff --git a/src/rabbit_sup.erl b/src/rabbit_sup.erl index b33b35c7905a..ad70540e5b26 100644 --- a/src/rabbit_sup.erl +++ b/src/rabbit_sup.erl @@ -33,22 +33,18 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). --spec(start_child/1 :: (atom()) -> 'ok'). --spec(start_child/2 :: (atom(), [any()]) -> 'ok'). --spec(start_child/3 :: (atom(), atom(), [any()]) -> 'ok'). --spec(start_supervisor_child/1 :: (atom()) -> 'ok'). --spec(start_supervisor_child/2 :: (atom(), [any()]) -> 'ok'). --spec(start_supervisor_child/3 :: (atom(), atom(), [any()]) -> 'ok'). --spec(start_restartable_child/1 :: (atom()) -> 'ok'). --spec(start_restartable_child/2 :: (atom(), [any()]) -> 'ok'). --spec(start_delayed_restartable_child/1 :: (atom()) -> 'ok'). --spec(start_delayed_restartable_child/2 :: (atom(), [any()]) -> 'ok'). --spec(stop_child/1 :: (atom()) -> rabbit_types:ok_or_error(any())). - --endif. +-spec start_link() -> rabbit_types:ok_pid_or_error(). +-spec start_child(atom()) -> 'ok'. +-spec start_child(atom(), [any()]) -> 'ok'. +-spec start_child(atom(), atom(), [any()]) -> 'ok'. +-spec start_supervisor_child(atom()) -> 'ok'. +-spec start_supervisor_child(atom(), [any()]) -> 'ok'. +-spec start_supervisor_child(atom(), atom(), [any()]) -> 'ok'. +-spec start_restartable_child(atom()) -> 'ok'. +-spec start_restartable_child(atom(), [any()]) -> 'ok'. +-spec start_delayed_restartable_child(atom()) -> 'ok'. +-spec start_delayed_restartable_child(atom(), [any()]) -> 'ok'. +-spec stop_child(atom()) -> rabbit_types:ok_or_error(any()). %%---------------------------------------------------------------------------- diff --git a/src/rabbit_table.erl b/src/rabbit_table.erl index aed49bbe7437..390909696499 100644 --- a/src/rabbit_table.erl +++ b/src/rabbit_table.erl @@ -24,21 +24,17 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(create/0 :: () -> 'ok'). --spec(create_local_copy/1 :: ('disc' | 'ram') -> 'ok'). --spec(wait_for_replicated/0 :: () -> 'ok'). --spec(wait/1 :: ([atom()]) -> 'ok'). --spec(wait_timeout/0 :: () -> non_neg_integer() | infinity). --spec(force_load/0 :: () -> 'ok'). --spec(is_present/0 :: () -> boolean()). --spec(is_empty/0 :: () -> boolean()). --spec(needs_default_data/0 :: () -> boolean()). --spec(check_schema_integrity/0 :: () -> rabbit_types:ok_or_error(any())). --spec(clear_ram_only_tables/0 :: () -> 'ok'). - --endif. +-spec create() -> 'ok'. +-spec create_local_copy('disc' | 'ram') -> 'ok'. +-spec wait_for_replicated() -> 'ok'. +-spec wait([atom()]) -> 'ok'. +-spec wait_timeout() -> non_neg_integer() | infinity. +-spec force_load() -> 'ok'. +-spec is_present() -> boolean(). +-spec is_empty() -> boolean(). +-spec needs_default_data() -> boolean(). +-spec check_schema_integrity() -> rabbit_types:ok_or_error(any()). +-spec clear_ram_only_tables() -> 'ok'. %%---------------------------------------------------------------------------- %% Main interface diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index cd4ecf9c3825..4bfd94e1e002 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -26,23 +26,19 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). +-type state() :: rabbit_types:exchange() | 'none'. --type(state() :: rabbit_types:exchange() | 'none'). - --spec(init/1 :: (rabbit_types:vhost()) -> state()). --spec(enabled/1 :: (rabbit_types:vhost()) -> boolean()). --spec(tap_in/6 :: (rabbit_types:basic_message(), [rabbit_amqqueue:name()], +-spec init(rabbit_types:vhost()) -> state(). +-spec enabled(rabbit_types:vhost()) -> boolean(). +-spec tap_in(rabbit_types:basic_message(), [rabbit_amqqueue:name()], binary(), rabbit_channel:channel_number(), - rabbit_types:username(), state()) -> 'ok'). --spec(tap_out/5 :: (rabbit_amqqueue:qmsg(), binary(), + rabbit_types:username(), state()) -> 'ok'. +-spec tap_out(rabbit_amqqueue:qmsg(), binary(), rabbit_channel:channel_number(), - rabbit_types:username(), state()) -> 'ok'). - --spec(start/1 :: (rabbit_types:vhost()) -> 'ok'). --spec(stop/1 :: (rabbit_types:vhost()) -> 'ok'). + rabbit_types:username(), state()) -> 'ok'. --endif. +-spec start(rabbit_types:vhost()) -> 'ok'. +-spec stop(rabbit_types:vhost()) -> 'ok'. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 2f59554d3cb6..f88b7cc73fcb 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -26,14 +26,11 @@ %% ------------------------------------------------------------------- --ifdef(use_specs). - --spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). --spec(maybe_upgrade_local/0 :: () -> 'ok' | - 'version_not_available' | - 'starting_from_scratch'). - --endif. +-spec maybe_upgrade_mnesia() -> 'ok'. +-spec maybe_upgrade_local() -> + 'ok' | + 'version_not_available' | + 'starting_from_scratch'. %% ------------------------------------------------------------------- diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index f9ed62b4b261..67c2a84a0ea1 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -55,39 +55,35 @@ %% ------------------------------------------------------------------- --ifdef(use_specs). - --spec(remove_user_scope/0 :: () -> 'ok'). --spec(hash_passwords/0 :: () -> 'ok'). --spec(add_ip_to_listener/0 :: () -> 'ok'). --spec(internal_exchanges/0 :: () -> 'ok'). --spec(user_to_internal_user/0 :: () -> 'ok'). --spec(topic_trie/0 :: () -> 'ok'). --spec(semi_durable_route/0 :: () -> 'ok'). --spec(exchange_event_serial/0 :: () -> 'ok'). --spec(trace_exchanges/0 :: () -> 'ok'). --spec(user_admin_to_tags/0 :: () -> 'ok'). --spec(ha_mirrors/0 :: () -> 'ok'). --spec(gm/0 :: () -> 'ok'). --spec(exchange_scratch/0 :: () -> 'ok'). --spec(mirrored_supervisor/0 :: () -> 'ok'). --spec(topic_trie_node/0 :: () -> 'ok'). --spec(runtime_parameters/0 :: () -> 'ok'). --spec(policy/0 :: () -> 'ok'). --spec(sync_slave_pids/0 :: () -> 'ok'). --spec(no_mirror_nodes/0 :: () -> 'ok'). --spec(gm_pids/0 :: () -> 'ok'). --spec(exchange_decorators/0 :: () -> 'ok'). --spec(policy_apply_to/0 :: () -> 'ok'). --spec(queue_decorators/0 :: () -> 'ok'). --spec(internal_system_x/0 :: () -> 'ok'). --spec(cluster_name/0 :: () -> 'ok'). --spec(down_slave_nodes/0 :: () -> 'ok'). --spec(queue_state/0 :: () -> 'ok'). --spec(recoverable_slaves/0 :: () -> 'ok'). --spec(user_password_hashing/0 :: () -> 'ok'). - --endif. +-spec remove_user_scope() -> 'ok'. +-spec hash_passwords() -> 'ok'. +-spec add_ip_to_listener() -> 'ok'. +-spec internal_exchanges() -> 'ok'. +-spec user_to_internal_user() -> 'ok'. +-spec topic_trie() -> 'ok'. +-spec semi_durable_route() -> 'ok'. +-spec exchange_event_serial() -> 'ok'. +-spec trace_exchanges() -> 'ok'. +-spec user_admin_to_tags() -> 'ok'. +-spec ha_mirrors() -> 'ok'. +-spec gm() -> 'ok'. +-spec exchange_scratch() -> 'ok'. +-spec mirrored_supervisor() -> 'ok'. +-spec topic_trie_node() -> 'ok'. +-spec runtime_parameters() -> 'ok'. +-spec policy() -> 'ok'. +-spec sync_slave_pids() -> 'ok'. +-spec no_mirror_nodes() -> 'ok'. +-spec gm_pids() -> 'ok'. +-spec exchange_decorators() -> 'ok'. +-spec policy_apply_to() -> 'ok'. +-spec queue_decorators() -> 'ok'. +-spec internal_system_x() -> 'ok'. +-spec cluster_name() -> 'ok'. +-spec down_slave_nodes() -> 'ok'. +-spec queue_state() -> 'ok'. +-spec recoverable_slaves() -> 'ok'. +-spec user_password_hashing() -> 'ok'. %%-------------------------------------------------------------------- diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 7bf7f2ec54b7..9af88ea53259 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -341,19 +341,17 @@ -rabbit_upgrade({multiple_routing_keys, local, []}). --ifdef(use_specs). +-type seq_id() :: non_neg_integer(). --type(seq_id() :: non_neg_integer()). - --type(rates() :: #rates { in :: float(), +-type rates() :: #rates { in :: float(), out :: float(), ack_in :: float(), ack_out :: float(), - timestamp :: rabbit_types:timestamp()}). + timestamp :: rabbit_types:timestamp()}. --type(delta() :: #delta { start_seq_id :: non_neg_integer(), +-type delta() :: #delta { start_seq_id :: non_neg_integer(), count :: non_neg_integer(), - end_seq_id :: non_neg_integer() }). + end_seq_id :: non_neg_integer() }. %% The compiler (rightfully) complains that ack() and state() are %% unused. For this reason we duplicate a -spec from @@ -361,8 +359,8 @@ %% warnings. The problem here is that we can't parameterise the BQ %% behaviour by these two types as we would like to. We still leave %% these here for documentation purposes. --type(ack() :: seq_id()). --type(state() :: #vqstate { +-type ack() :: seq_id(). +-type state() :: #vqstate { q1 :: ?QUEUE:?QUEUE(), q2 :: ?QUEUE:?QUEUE(), delta :: delta(), @@ -404,13 +402,11 @@ disk_write_count :: non_neg_integer(), io_batch_size :: pos_integer(), - mode :: 'default' | 'lazy' }). + mode :: 'default' | 'lazy' }. %% Duplicated from rabbit_backing_queue --spec(ack/2 :: ([ack()], state()) -> {[rabbit_guid:guid()], state()}). - --spec(multiple_routing_keys/0 :: () -> 'ok'). +-spec ack([ack()], state()) -> {[rabbit_guid:guid()], state()}. --endif. +-spec multiple_routing_keys() -> 'ok'. -define(BLANK_DELTA, #delta { start_seq_id = undefined, count = 0, diff --git a/src/rabbit_version.erl b/src/rabbit_version.erl index 8167c8622ad1..a27f0aca0052 100644 --- a/src/rabbit_version.erl +++ b/src/rabbit_version.erl @@ -23,33 +23,32 @@ version_error/3]). %% ------------------------------------------------------------------- --ifdef(use_specs). -export_type([scope/0, step/0]). --type(scope() :: atom()). --type(scope_version() :: [atom()]). --type(step() :: {atom(), atom()}). - --type(version() :: [atom()]). - --spec(recorded/0 :: () -> rabbit_types:ok_or_error2(version(), any())). --spec(matches/2 :: ([A], [A]) -> boolean()). --spec(desired/0 :: () -> version()). --spec(desired_for_scope/1 :: (scope()) -> scope_version()). --spec(record_desired/0 :: () -> 'ok'). --spec(record_desired_for_scope/1 :: - (scope()) -> rabbit_types:ok_or_error(any())). --spec(upgrades_required/1 :: - (scope()) -> rabbit_types:ok_or_error2([step()], any())). --spec(check_version_consistency/3 :: - (string(), string(), string()) -> rabbit_types:ok_or_error(any())). --spec(check_version_consistency/4 :: +-type scope() :: atom(). +-type scope_version() :: [atom()]. +-type step() :: {atom(), atom()}. + +-type version() :: [atom()]. + +-spec recorded() -> rabbit_types:ok_or_error2(version(), any()). +-spec matches([A], [A]) -> boolean(). +-spec desired() -> version(). +-spec desired_for_scope(scope()) -> scope_version(). +-spec record_desired() -> 'ok'. +-spec record_desired_for_scope + (scope()) -> rabbit_types:ok_or_error(any()). +-spec upgrades_required + (scope()) -> rabbit_types:ok_or_error2([step()], any()). +-spec check_version_consistency + (string(), string(), string()) -> rabbit_types:ok_or_error(any()). +-spec check_version_consistency (string(), string(), string(), string()) -> - rabbit_types:ok_or_error(any())). --spec(check_otp_consistency/1 :: - (string()) -> rabbit_types:ok_or_error(any())). --endif. + rabbit_types:ok_or_error(any()). +-spec check_otp_consistency + (string()) -> rabbit_types:ok_or_error(any()). + %% ------------------------------------------------------------------- -define(VERSION_FILENAME, "schema_version"). diff --git a/src/rabbit_vhost.erl b/src/rabbit_vhost.erl index f362ef930f1a..df2f8423b48a 100644 --- a/src/rabbit_vhost.erl +++ b/src/rabbit_vhost.erl @@ -23,24 +23,20 @@ -export([add/1, delete/1, exists/1, list/0, with/2, assert/1]). -export([info/1, info/2, info_all/0, info_all/1, info_all/2, info_all/3]). --ifdef(use_specs). - --spec(add/1 :: (rabbit_types:vhost()) -> 'ok'). --spec(delete/1 :: (rabbit_types:vhost()) -> 'ok'). --spec(exists/1 :: (rabbit_types:vhost()) -> boolean()). --spec(list/0 :: () -> [rabbit_types:vhost()]). --spec(with/2 :: (rabbit_types:vhost(), rabbit_misc:thunk(A)) -> A). --spec(assert/1 :: (rabbit_types:vhost()) -> 'ok'). - --spec(info/1 :: (rabbit_types:vhost()) -> rabbit_types:infos()). --spec(info/2 :: (rabbit_types:vhost(), rabbit_types:info_keys()) - -> rabbit_types:infos()). --spec(info_all/0 :: () -> [rabbit_types:infos()]). --spec(info_all/1 :: (rabbit_types:info_keys()) -> [rabbit_types:infos()]). --spec(info_all/3 :: (rabbit_types:info_keys(), reference(), pid()) -> - 'ok'). - --endif. +-spec add(rabbit_types:vhost()) -> 'ok'. +-spec delete(rabbit_types:vhost()) -> 'ok'. +-spec exists(rabbit_types:vhost()) -> boolean(). +-spec list() -> [rabbit_types:vhost()]. +-spec with(rabbit_types:vhost(), rabbit_misc:thunk(A)) -> A. +-spec assert(rabbit_types:vhost()) -> 'ok'. + +-spec info(rabbit_types:vhost()) -> rabbit_types:infos(). +-spec info(rabbit_types:vhost(), rabbit_types:info_keys()) + -> rabbit_types:infos(). +-spec info_all() -> [rabbit_types:infos()]. +-spec info_all(rabbit_types:info_keys()) -> [rabbit_types:infos()]. +-spec info_all(rabbit_types:info_keys(), reference(), pid()) -> + 'ok'. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_vm.erl b/src/rabbit_vm.erl index d67331aec7e2..9c8732bb6b62 100644 --- a/src/rabbit_vm.erl +++ b/src/rabbit_vm.erl @@ -23,15 +23,11 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(memory/0 :: () -> rabbit_types:infos()). --spec(binary/0 :: () -> rabbit_types:infos()). --spec(ets_tables_memory/1 :: (Owners) -> rabbit_types:infos() +-spec memory() -> rabbit_types:infos(). +-spec binary() -> rabbit_types:infos(). +-spec ets_tables_memory(Owners) -> rabbit_types:infos() when Owners :: all | OwnerProcessName | [OwnerProcessName], - OwnerProcessName :: atom()). - --endif. + OwnerProcessName :: atom(). %%---------------------------------------------------------------------------- @@ -230,21 +226,19 @@ conn_type(PDict) -> %% NB: this code is non-rabbit specific. --ifdef(use_specs). --type(process() :: pid() | atom()). --type(info_key() :: atom()). --type(info_value() :: any()). --type(info_item() :: {info_key(), info_value()}). --type(accumulate() :: fun ((info_key(), info_value(), info_value()) -> - info_value())). --type(distinguisher() :: fun (([{term(), term()}]) -> atom())). --type(distinguishers() :: [{info_key(), distinguisher()}]). --spec(sum_processes/3 :: ([process()], distinguishers(), [info_key()]) -> - {[{process(), [info_item()]}], [info_item()]}). --spec(sum_processes/4 :: ([process()], accumulate(), distinguishers(), +-type process() :: pid() | atom(). +-type info_key() :: atom(). +-type info_value() :: any(). +-type info_item() :: {info_key(), info_value()}. +-type accumulate() :: fun ((info_key(), info_value(), info_value()) -> + info_value()). +-type distinguisher() :: fun (([{term(), term()}]) -> atom()). +-type distinguishers() :: [{info_key(), distinguisher()}]. +-spec sum_processes([process()], distinguishers(), [info_key()]) -> + {[{process(), [info_item()]}], [info_item()]}. +-spec sum_processes([process()], accumulate(), distinguishers(), [info_item()]) -> - {[{process(), [info_item()]}], [info_item()]}). --endif. + {[{process(), [info_item()]}], [info_item()]}. sum_processes(Names, Distinguishers, Items) -> sum_processes(Names, fun (_, X, Y) -> X + Y end, Distinguishers, diff --git a/src/supervised_lifecycle.erl b/src/supervised_lifecycle.erl index 981956a07109..5b0f56dc2674 100644 --- a/src/supervised_lifecycle.erl +++ b/src/supervised_lifecycle.erl @@ -36,12 +36,8 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start_link/3 :: (atom(), rabbit_types:mfargs(), rabbit_types:mfargs()) -> - rabbit_types:ok_pid_or_error()). - --endif. +-spec start_link(atom(), rabbit_types:mfargs(), rabbit_types:mfargs()) -> + rabbit_types:ok_pid_or_error(). %%---------------------------------------------------------------------------- diff --git a/src/tcp_listener.erl b/src/tcp_listener.erl index 36b04a459317..5f15592455b8 100644 --- a/src/tcp_listener.erl +++ b/src/tcp_listener.erl @@ -57,16 +57,12 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). +-type mfargs() :: {atom(), atom(), [any()]}. --type(mfargs() :: {atom(), atom(), [any()]}). - --spec(start_link/5 :: +-spec start_link (inet:ip_address(), inet:port_number(), mfargs(), mfargs(), string()) -> - rabbit_types:ok_pid_or_error()). - --endif. + rabbit_types:ok_pid_or_error(). %%-------------------------------------------------------------------- diff --git a/src/tcp_listener_sup.erl b/src/tcp_listener_sup.erl index 98a7c890d92c..5ef652ad60c1 100644 --- a/src/tcp_listener_sup.erl +++ b/src/tcp_listener_sup.erl @@ -31,16 +31,12 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). +-type mfargs() :: {atom(), atom(), [any()]}. --type(mfargs() :: {atom(), atom(), [any()]}). - --spec(start_link/10 :: +-spec start_link (inet:ip_address(), inet:port_number(), module(), [gen_tcp:listen_option()], module(), any(), mfargs(), mfargs(), integer(), string()) -> - rabbit_types:ok_pid_or_error()). - --endif. + rabbit_types:ok_pid_or_error(). %%---------------------------------------------------------------------------- diff --git a/src/vm_memory_monitor.erl b/src/vm_memory_monitor.erl index 68926962b045..6b043685bde6 100644 --- a/src/vm_memory_monitor.erl +++ b/src/vm_memory_monitor.erl @@ -62,21 +62,17 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --type(vm_memory_high_watermark() :: (float() | {'absolute', integer() | string()})). --spec(start_link/1 :: (float()) -> rabbit_types:ok_pid_or_error()). --spec(start_link/3 :: (float(), fun ((any()) -> 'ok'), - fun ((any()) -> 'ok')) -> rabbit_types:ok_pid_or_error()). --spec(get_total_memory/0 :: () -> (non_neg_integer() | 'unknown')). --spec(get_vm_limit/0 :: () -> non_neg_integer()). --spec(get_check_interval/0 :: () -> non_neg_integer()). --spec(set_check_interval/1 :: (non_neg_integer()) -> 'ok'). --spec(get_vm_memory_high_watermark/0 :: () -> vm_memory_high_watermark()). --spec(set_vm_memory_high_watermark/1 :: (vm_memory_high_watermark()) -> 'ok'). --spec(get_memory_limit/0 :: () -> non_neg_integer()). - --endif. +-type vm_memory_high_watermark() :: (float() | {'absolute', integer() | string()}). +-spec start_link(float()) -> rabbit_types:ok_pid_or_error(). +-spec start_link(float(), fun ((any()) -> 'ok'), + fun ((any()) -> 'ok')) -> rabbit_types:ok_pid_or_error(). +-spec get_total_memory() -> (non_neg_integer() | 'unknown'). +-spec get_vm_limit() -> non_neg_integer(). +-spec get_check_interval() -> non_neg_integer(). +-spec set_check_interval(non_neg_integer()) -> 'ok'. +-spec get_vm_memory_high_watermark() -> vm_memory_high_watermark(). +-spec set_vm_memory_high_watermark(vm_memory_high_watermark()) -> 'ok'. +-spec get_memory_limit() -> non_neg_integer(). %%---------------------------------------------------------------------------- %% Public API diff --git a/src/worker_pool.erl b/src/worker_pool.erl index 6bacd43d273d..c0be486f5fba 100644 --- a/src/worker_pool.erl +++ b/src/worker_pool.erl @@ -61,20 +61,16 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --type(mfargs() :: {atom(), atom(), [any()]}). - --spec(start_link/1 :: (atom()) -> {'ok', pid()} | {'error', any()}). --spec(submit/1 :: (fun (() -> A) | mfargs()) -> A). --spec(submit/2 :: (fun (() -> A) | mfargs(), 'reuse' | 'single') -> A). --spec(submit/3 :: (atom(), fun (() -> A) | mfargs(), 'reuse' | 'single') -> A). --spec(submit_async/1 :: (fun (() -> any()) | mfargs()) -> 'ok'). --spec(ready/2 :: (atom(), pid()) -> 'ok'). --spec(idle/2 :: (atom(), pid()) -> 'ok'). --spec(default_pool/0 :: () -> atom()). - --endif. +-type mfargs() :: {atom(), atom(), [any()]}. + +-spec start_link(atom()) -> {'ok', pid()} | {'error', any()}. +-spec submit(fun (() -> A) | mfargs()) -> A. +-spec submit(fun (() -> A) | mfargs(), 'reuse' | 'single') -> A. +-spec submit(atom(), fun (() -> A) | mfargs(), 'reuse' | 'single') -> A. +-spec submit_async(fun (() -> any()) | mfargs()) -> 'ok'. +-spec ready(atom(), pid()) -> 'ok'. +-spec idle(atom(), pid()) -> 'ok'. +-spec default_pool() -> atom(). %%---------------------------------------------------------------------------- diff --git a/src/worker_pool_sup.erl b/src/worker_pool_sup.erl index 3b2c3476c254..f4ed4d70c242 100644 --- a/src/worker_pool_sup.erl +++ b/src/worker_pool_sup.erl @@ -24,14 +24,10 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). --spec(start_link/1 :: (non_neg_integer()) -> rabbit_types:ok_pid_or_error()). --spec(start_link/2 :: (non_neg_integer(), atom()) - -> rabbit_types:ok_pid_or_error()). - --endif. +-spec start_link() -> rabbit_types:ok_pid_or_error(). +-spec start_link(non_neg_integer()) -> rabbit_types:ok_pid_or_error(). +-spec start_link(non_neg_integer(), atom()) + -> rabbit_types:ok_pid_or_error(). %%---------------------------------------------------------------------------- diff --git a/src/worker_pool_worker.erl b/src/worker_pool_worker.erl index 259af5e4a2f7..bd07f0d782ad 100644 --- a/src/worker_pool_worker.erl +++ b/src/worker_pool_worker.erl @@ -33,18 +33,14 @@ %%---------------------------------------------------------------------------- --ifdef(use_specs). - --type(mfargs() :: {atom(), atom(), [any()]}). - --spec(start_link/1 :: (atom) -> {'ok', pid()} | {'error', any()}). --spec(next_job_from/2 :: (pid(), pid()) -> 'ok'). --spec(submit/3 :: (pid(), fun (() -> A) | mfargs(), 'reuse' | 'single') -> A). --spec(submit_async/2 :: (pid(), fun (() -> any()) | mfargs()) -> 'ok'). --spec(run/1 :: (fun (() -> A)) -> A; (mfargs()) -> any()). --spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok'). - --endif. +-type mfargs() :: {atom(), atom(), [any()]}. + +-spec start_link(atom) -> {'ok', pid()} | {'error', any()}. +-spec next_job_from(pid(), pid()) -> 'ok'. +-spec submit(pid(), fun (() -> A) | mfargs(), 'reuse' | 'single') -> A. +-spec submit_async(pid(), fun (() -> any()) | mfargs()) -> 'ok'. +-spec run(fun (() -> A)) -> A; (mfargs()) -> any(). +-spec set_maximum_since_use(pid(), non_neg_integer()) -> 'ok'. %%---------------------------------------------------------------------------- diff --git a/test/channel_operation_timeout_test_queue.erl b/test/channel_operation_timeout_test_queue.erl index 55cd5f42fa55..0e25dfb99163 100644 --- a/test/channel_operation_timeout_test_queue.erl +++ b/test/channel_operation_timeout_test_queue.erl @@ -123,19 +123,17 @@ -rabbit_upgrade({multiple_routing_keys, local, []}). --ifdef(use_specs). +-type seq_id() :: non_neg_integer(). --type(seq_id() :: non_neg_integer()). - --type(rates() :: #rates { in :: float(), +-type rates() :: #rates { in :: float(), out :: float(), ack_in :: float(), ack_out :: float(), - timestamp :: rabbit_types:timestamp()}). + timestamp :: rabbit_types:timestamp()}. --type(delta() :: #delta { start_seq_id :: non_neg_integer(), +-type delta() :: #delta { start_seq_id :: non_neg_integer(), count :: non_neg_integer(), - end_seq_id :: non_neg_integer() }). + end_seq_id :: non_neg_integer() }. %% The compiler (rightfully) complains that ack() and state() are %% unused. For this reason we duplicate a -spec from @@ -143,8 +141,8 @@ %% warnings. The problem here is that we can't parameterise the BQ %% behaviour by these two types as we would like to. We still leave %% these here for documentation purposes. --type(ack() :: seq_id()). --type(state() :: #vqstate { +-type ack() :: seq_id(). +-type state() :: #vqstate { q1 :: ?QUEUE:?QUEUE(), q2 :: ?QUEUE:?QUEUE(), delta :: delta(), @@ -186,13 +184,11 @@ disk_write_count :: non_neg_integer(), io_batch_size :: pos_integer(), - mode :: 'default' | 'lazy' }). + mode :: 'default' | 'lazy' }. %% Duplicated from rabbit_backing_queue --spec(ack/2 :: ([ack()], state()) -> {[rabbit_guid:guid()], state()}). - --spec(multiple_routing_keys/0 :: () -> 'ok'). +-spec ack([ack()], state()) -> {[rabbit_guid:guid()], state()}. --endif. +-spec multiple_routing_keys() -> 'ok'. -define(BLANK_DELTA, #delta { start_seq_id = undefined, count = 0, From 2735990fcc5907613c053d16ce86686662c39dc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Tue, 28 Jun 2016 15:56:17 +0200 Subject: [PATCH 172/174] Use the new `rand_compat` module to transition from `random` to `rand` References #860. [#122335241] --- src/file_handle_cache.erl | 2 +- src/gm.erl | 5 +---- src/rabbit_cli.erl | 2 +- src/rabbit_limiter.erl | 2 +- src/rabbit_mirror_queue_mode_exactly.erl | 5 +---- src/rabbit_password.erl | 5 +---- test/priority_queue_SUITE.erl | 2 +- test/unit_inbroker_SUITE.erl | 2 +- 8 files changed, 8 insertions(+), 17 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index ec2d82186a8c..2b69f39b04ce 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -1477,7 +1477,7 @@ notify_age(CStates, AverageAge) -> notify_age0(Clients, CStates, Required) -> case [CState || CState <- CStates, CState#cstate.callback =/= undefined] of [] -> ok; - Notifications -> S = random:uniform(length(Notifications)), + Notifications -> S = rand_compat:uniform(length(Notifications)), {L1, L2} = lists:split(S, Notifications), notify(Clients, Required, L2 ++ L1) end. diff --git a/src/gm.erl b/src/gm.erl index dc47b93a5b6c..176e14537f2d 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -536,9 +536,6 @@ forget_group(GroupName) -> init([GroupName, Module, Args, TxnFun]) -> put(process_name, {?MODULE, GroupName}), - _ = random:seed(erlang:phash2([node()]), - time_compat:monotonic_time(), - time_compat:unique_integer()), Self = make_member(GroupName), gen_server2:cast(self(), join), {ok, #state { self = Self, @@ -1069,7 +1066,7 @@ join_group(Self, GroupName, #gm_group { members = Members } = Group, TxnFun) -> prune_or_create_group(Self, GroupName, TxnFun), TxnFun); Alive -> - Left = lists:nth(random:uniform(length(Alive)), Alive), + Left = lists:nth(rand_compat:uniform(length(Alive)), Alive), Handler = fun () -> join_group( diff --git a/src/rabbit_cli.erl b/src/rabbit_cli.erl index d1229c32f916..6b3548221747 100644 --- a/src/rabbit_cli.erl +++ b/src/rabbit_cli.erl @@ -147,7 +147,7 @@ main(ParseFun, DoFun, UsageMod) -> start_distribution_anon(0, LastError) -> {error, LastError}; start_distribution_anon(TriesLeft, _) -> - NameCandidate = list_to_atom(rabbit_misc:format("rabbitmq-cli-~2..0b", [rabbit_misc:random(100)])), + NameCandidate = list_to_atom(rabbit_misc:format("rabbitmq-cli-~2..0b", [rand_compat:uniform(100)])), case net_kernel:start([NameCandidate, name_type()]) of {ok, _} = Result -> Result; diff --git a/src/rabbit_limiter.erl b/src/rabbit_limiter.erl index 94620416d69a..203e309b0295 100644 --- a/src/rabbit_limiter.erl +++ b/src/rabbit_limiter.erl @@ -432,7 +432,7 @@ notify_queues(State = #lim{ch_pid = ChPid, queues = Queues}) -> %% We randomly vary the position of queues in the list, %% thus ensuring that each queue has an equal chance of %% being notified first. - {L1, L2} = lists:split(random:uniform(L), QList), + {L1, L2} = lists:split(rand_compat:uniform(L), QList), [[ok = rabbit_amqqueue:resume(Q, ChPid) || Q <- L3] || L3 <- [L2, L1]], ok diff --git a/src/rabbit_mirror_queue_mode_exactly.erl b/src/rabbit_mirror_queue_mode_exactly.erl index 4721ad613630..593f0a4138bb 100644 --- a/src/rabbit_mirror_queue_mode_exactly.erl +++ b/src/rabbit_mirror_queue_mode_exactly.erl @@ -45,10 +45,7 @@ suggested_queue_nodes(Count, MNode, SNodes, _SSNodes, Poss) -> end}. shuffle(L) -> - random:seed(erlang:phash2([node()]), - time_compat:monotonic_time(), - time_compat:unique_integer()), - {_, L1} = lists:unzip(lists:keysort(1, [{random:uniform(), N} || N <- L])), + {_, L1} = lists:unzip(lists:keysort(1, [{rand_compat:uniform(), N} || N <- L])), L1. validate_policy(N) when is_integer(N) andalso N > 0 -> diff --git a/src/rabbit_password.erl b/src/rabbit_password.erl index d5b0945de977..b7987df1d806 100644 --- a/src/rabbit_password.erl +++ b/src/rabbit_password.erl @@ -35,10 +35,7 @@ hash(HashingMod, Cleartext) -> <>. generate_salt() -> - random:seed(erlang:phash2([node()]), - time_compat:monotonic_time(), - time_compat:unique_integer()), - Salt = random:uniform(16#ffffffff), + Salt = rand_compat:uniform(16#ffffffff), <>. salted_hash(Salt, Cleartext) -> diff --git a/test/priority_queue_SUITE.erl b/test/priority_queue_SUITE.erl index 46fafd89f728..05853ebc1f7f 100644 --- a/test/priority_queue_SUITE.erl +++ b/test/priority_queue_SUITE.erl @@ -546,7 +546,7 @@ publish_payload(Ch, Q, PPds) -> amqp_channel:wait_for_confirms(Ch). publish_many(_Ch, _Q, 0) -> ok; -publish_many( Ch, Q, N) -> publish1(Ch, Q, random:uniform(5)), +publish_many( Ch, Q, N) -> publish1(Ch, Q, rand_compat:uniform(5)), publish_many(Ch, Q, N - 1). publish1(Ch, Q, P) -> diff --git a/test/unit_inbroker_SUITE.erl b/test/unit_inbroker_SUITE.erl index e20f63ba8bb2..e9ecbf5444dd 100644 --- a/test/unit_inbroker_SUITE.erl +++ b/test/unit_inbroker_SUITE.erl @@ -1316,7 +1316,7 @@ maybe_switch_queue_mode(VQ) -> random_queue_mode() -> Modes = [lazy, default], - lists:nth(random:uniform(length(Modes)), Modes). + lists:nth(rand_compat:uniform(length(Modes)), Modes). pub_res({_, VQS}) -> VQS; From f7c54b9a7caaa9c3655fe8cfeccec8deaa28ca5c Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Tue, 28 Jun 2016 22:58:25 +0300 Subject: [PATCH 173/174] Ignore debug/* --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index f62802a7ec1d..7582deb64e8a 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ *.coverdata /.erlang.mk/ /cover/ +/debug/ /deps/ /doc/ /ebin/ From f0f43f8002678451bdb1e1d7ccdd5e7723949625 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Wed, 29 Jun 2016 12:01:23 +0200 Subject: [PATCH 174/174] Use `rand` directly in master because we require Erlang 18.3 References rabbitmq/rabbitmq-server#860. [#122335241] --- src/file_handle_cache.erl | 2 +- src/gm.erl | 2 +- src/rabbit_cli.erl | 2 +- src/rabbit_limiter.erl | 2 +- src/rabbit_mirror_queue_mode_exactly.erl | 2 +- src/rabbit_password.erl | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index 0fa61092e7bd..f2c28c4e4360 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -1477,7 +1477,7 @@ notify_age(CStates, AverageAge) -> notify_age0(Clients, CStates, Required) -> case [CState || CState <- CStates, CState#cstate.callback =/= undefined] of [] -> ok; - Notifications -> S = rand_compat:uniform(length(Notifications)), + Notifications -> S = rand:uniform(length(Notifications)), {L1, L2} = lists:split(S, Notifications), notify(Clients, Required, L2 ++ L1) end. diff --git a/src/gm.erl b/src/gm.erl index 176e14537f2d..3554f01d56ff 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -1066,7 +1066,7 @@ join_group(Self, GroupName, #gm_group { members = Members } = Group, TxnFun) -> prune_or_create_group(Self, GroupName, TxnFun), TxnFun); Alive -> - Left = lists:nth(rand_compat:uniform(length(Alive)), Alive), + Left = lists:nth(rand:uniform(length(Alive)), Alive), Handler = fun () -> join_group( diff --git a/src/rabbit_cli.erl b/src/rabbit_cli.erl index 6b3548221747..d324625d1b42 100644 --- a/src/rabbit_cli.erl +++ b/src/rabbit_cli.erl @@ -147,7 +147,7 @@ main(ParseFun, DoFun, UsageMod) -> start_distribution_anon(0, LastError) -> {error, LastError}; start_distribution_anon(TriesLeft, _) -> - NameCandidate = list_to_atom(rabbit_misc:format("rabbitmq-cli-~2..0b", [rand_compat:uniform(100)])), + NameCandidate = list_to_atom(rabbit_misc:format("rabbitmq-cli-~2..0b", [rand:uniform(100)])), case net_kernel:start([NameCandidate, name_type()]) of {ok, _} = Result -> Result; diff --git a/src/rabbit_limiter.erl b/src/rabbit_limiter.erl index 203e309b0295..74e802b3a036 100644 --- a/src/rabbit_limiter.erl +++ b/src/rabbit_limiter.erl @@ -432,7 +432,7 @@ notify_queues(State = #lim{ch_pid = ChPid, queues = Queues}) -> %% We randomly vary the position of queues in the list, %% thus ensuring that each queue has an equal chance of %% being notified first. - {L1, L2} = lists:split(rand_compat:uniform(L), QList), + {L1, L2} = lists:split(rand:uniform(L), QList), [[ok = rabbit_amqqueue:resume(Q, ChPid) || Q <- L3] || L3 <- [L2, L1]], ok diff --git a/src/rabbit_mirror_queue_mode_exactly.erl b/src/rabbit_mirror_queue_mode_exactly.erl index 593f0a4138bb..c2ffa39f5958 100644 --- a/src/rabbit_mirror_queue_mode_exactly.erl +++ b/src/rabbit_mirror_queue_mode_exactly.erl @@ -45,7 +45,7 @@ suggested_queue_nodes(Count, MNode, SNodes, _SSNodes, Poss) -> end}. shuffle(L) -> - {_, L1} = lists:unzip(lists:keysort(1, [{rand_compat:uniform(), N} || N <- L])), + {_, L1} = lists:unzip(lists:keysort(1, [{rand:uniform(), N} || N <- L])), L1. validate_policy(N) when is_integer(N) andalso N > 0 -> diff --git a/src/rabbit_password.erl b/src/rabbit_password.erl index b7987df1d806..0538445ab4b2 100644 --- a/src/rabbit_password.erl +++ b/src/rabbit_password.erl @@ -35,7 +35,7 @@ hash(HashingMod, Cleartext) -> <>. generate_salt() -> - Salt = rand_compat:uniform(16#ffffffff), + Salt = rand:uniform(16#ffffffff), <>. salted_hash(Salt, Cleartext) ->