From 9816787a185c0b28a4b69207cfcd4676b71ef857 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 22 Feb 2022 22:08:54 +0100 Subject: [PATCH 001/161] Add ejabberd_sm_cets Use cets for stream resumption backend Print cets tables in mongoosectl mnesia info Enable cets on CI Add test for mongooseimctl mnesia info Test ejabberd_sm_cets in ejabberd_sm_SUITE Rename to cets --- .circleci/template.yml | 11 +- big_tests/test.config | 7 + big_tests/tests/ct_helper.erl | 12 +- big_tests/tests/mongoose_helper.erl | 9 +- big_tests/tests/mongooseimctl_SUITE.erl | 16 +- big_tests/tests/sm_SUITE.erl | 17 ++- rebar.config | 13 +- rebar.lock | 4 + rel/files/cets_disco.txt | 3 + src/ejabberd_ctl.erl | 14 ++ src/ejabberd_sm.erl | 2 +- src/ejabberd_sm_cets.erl | 139 +++++++++++++++++ src/ejabberd_sup.erl | 10 +- src/mongooseim.app.src | 3 +- .../mod_stream_management_cets.erl | 141 ++++++++++++++++++ test/ejabberd_sm_SUITE.erl | 29 +++- tools/build-releases.sh | 1 + tools/test_runner/apply_templates.erl | 15 +- 18 files changed, 413 insertions(+), 33 deletions(-) create mode 100644 rel/files/cets_disco.txt create mode 100644 src/ejabberd_sm_cets.erl create mode 100644 src/stream_management/mod_stream_management_cets.erl diff --git a/.circleci/template.yml b/.circleci/template.yml index dda8008fa0d..39537a18724 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -591,7 +591,7 @@ jobs: preset: type: enum enum: [internal_mnesia, mysql_redis, odbc_mssql_mnesia, ldap_mnesia, - elasticsearch_and_cassandra_mnesia, pgsql_mnesia] + elasticsearch_and_cassandra_mnesia, pgsql_mnesia, internal_cets] description: Preset to run default: internal_mnesia db: @@ -825,6 +825,15 @@ workflows: requires: - otp_25_docker filters: *all_tags + - big_tests_in_docker: + name: internal_cets_25 + executor: otp_25_redis + context: mongooseim-org + preset: internal_cets + db: "mnesia cets" + requires: + - otp_25_docker + filters: *all_tags - big_tests_in_docker: name: mysql_redis_25 executor: otp_25_mysql_redis diff --git a/big_tests/test.config b/big_tests/test.config index 1b15c308817..377df5020e4 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -236,6 +236,13 @@ [{dbs, [redis, minio]}, {outgoing_pools, "[outgoing_pools.redis.global_distrib] scope = \"global\" + workers = 10"}]}, + {internal_cets, + [{dbs, [redis]}, + {sm_backend, "\"cets\""}, + {stream_management_backend, cets}, + {outgoing_pools, "[outgoing_pools.redis.global_distrib] + scope = \"global\" workers = 10"}]}, {pgsql_mnesia, [{dbs, [redis, pgsql]}, diff --git a/big_tests/tests/ct_helper.erl b/big_tests/tests/ct_helper.erl index dd5a0f01973..a7bd86833e9 100644 --- a/big_tests/tests/ct_helper.erl +++ b/big_tests/tests/ct_helper.erl @@ -4,7 +4,8 @@ repeat_all_until_all_ok/2, repeat_all_until_any_fail/1, repeat_all_until_any_fail/2, - groups_to_all/1]). + groups_to_all/1, + get_preset_var/3]). -type group_name() :: atom(). @@ -114,3 +115,12 @@ is_ct_started() -> groups_to_all(Groups) -> [{group, Name} || {Name, _Opts, _Cases} <- Groups]. + +get_preset_var(Config, Opt, Def) -> + case proplists:get_value(preset, Config, undefined) of + Preset -> + PresetAtom = list_to_existing_atom(Preset), + ct:get_config({presets, toml, PresetAtom, Opt}, Def); + _ -> + Def + end. diff --git a/big_tests/tests/mongoose_helper.erl b/big_tests/tests/mongoose_helper.erl index a2fedb3c284..e60c6037545 100644 --- a/big_tests/tests/mongoose_helper.erl +++ b/big_tests/tests/mongoose_helper.erl @@ -503,13 +503,8 @@ restart_listener(Spec, Listener) -> rpc(Spec, mongoose_listener, start_listener, [Listener]). should_minio_be_running(Config) -> - case proplists:get_value(preset, Config, undefined) of - undefined -> false; - Preset -> - PresetAtom = list_to_existing_atom(Preset), - DBs = ct:get_config({presets, toml, PresetAtom, dbs}, []), - lists:member(minio, DBs) - end. + DBs = ct_helper:get_preset_var(Config, dbs, []), + lists:member(minio, DBs). %% It is useful to debug dynamic IQ handler registration print_debug_info_for_module(Module) -> diff --git a/big_tests/tests/mongooseimctl_SUITE.erl b/big_tests/tests/mongooseimctl_SUITE.erl index b4a57679825..e59671e92d0 100644 --- a/big_tests/tests/mongooseimctl_SUITE.erl +++ b/big_tests/tests/mongooseimctl_SUITE.erl @@ -136,7 +136,8 @@ basic() -> dump_table, get_loglevel, remove_old_messages_test, - remove_expired_messages_test]. + remove_expired_messages_test, + cets_tables_are_in_mnesia_info]. accounts() -> [change_password, check_password_hash, check_password, check_account, ban_account, num_active_users, delete_old_users, @@ -311,6 +312,13 @@ end_per_group(_GroupName, Config) -> get_registered_users() -> rpc(mim(), ejabberd_auth, get_vh_registered_users, [domain()]). +init_per_testcase(CaseName = cets_tables_are_in_mnesia_info, Config) -> + case rpc(mim(), ejabberd_sm, sm_backend, []) of + ejabberd_sm_cets -> + escalus:init_per_testcase(CaseName, Config); + _ -> + {skip, "Only for cets preset"} + end; init_per_testcase(CaseName, Config) when CaseName == delete_old_users_vhost orelse CaseName == stats_global @@ -1430,6 +1438,12 @@ remove_expired_messages_test(Config) -> 2 = length(SecondList) end). +cets_tables_are_in_mnesia_info(Config) -> + {Out, 0} = mongooseimctl("mnesia", ["info"], Config), + Lines = binary:split(iolist_to_binary(Out), <<"\n">>, [global]), + [_Line] = [L || <<"table=cets_session", _/binary>> = L <- Lines], + ok. + %%----------------------------------------------------------------- %% Helpers %%----------------------------------------------------------------- diff --git a/big_tests/tests/sm_SUITE.erl b/big_tests/tests/sm_SUITE.erl index 99bae20b4f2..cb6125d9a0a 100644 --- a/big_tests/tests/sm_SUITE.erl +++ b/big_tests/tests/sm_SUITE.erl @@ -144,7 +144,7 @@ init_per_group(Group, Config) when Group =:= parallel_unacknowledged_message_hoo Group =:= manual_ack_freq_long_session_timeout; Group =:= parallel_manual_ack_freq_1; Group =:= manual_ack_freq_2 -> - dynamic_modules:ensure_modules(host_type(), required_modules(group, Group)), + dynamic_modules:ensure_modules(host_type(), required_modules(Config, group, Group)), Config; init_per_group(stale_h, Config) -> Config; @@ -153,18 +153,18 @@ init_per_group(stream_mgmt_disabled, Config) -> rpc(mim(), mnesia, delete_table, [sm_session]), Config; init_per_group(Group, Config) -> - dynamic_modules:ensure_modules(host_type(), required_modules(group, Group)), + dynamic_modules:ensure_modules(host_type(), required_modules(Config, group, Group)), Config. end_per_group(_Group, _Config) -> ok. init_per_testcase(resume_expired_session_returns_correct_h = CN, Config) -> - dynamic_modules:ensure_modules(host_type(), required_modules(testcase, CN)), + dynamic_modules:ensure_modules(host_type(), required_modules(Config, testcase, CN)), escalus:init_per_testcase(CN, Config); init_per_testcase(CN, Config) when CN =:= gc_repeat_after_never_means_no_cleaning; CN =:= gc_repeat_after_timeout_does_clean -> - dynamic_modules:ensure_modules(host_type(), required_modules(testcase, CN)), + dynamic_modules:ensure_modules(host_type(), required_modules(Config, testcase, CN)), Config2 = register_some_smid_h(Config), escalus:init_per_testcase(CN, Config2); init_per_testcase(server_requests_ack_freq_2 = CN, Config) -> @@ -189,10 +189,10 @@ end_per_testcase(CaseName, Config) -> %% Module configuration per group (in case of stale_h group it is per testcase) -required_modules(Scope, Name) -> +required_modules(Config, Scope, Name) -> SMConfig = case required_sm_opts(Scope, Name) of stopped -> stopped; - ExtraOpts -> maps:merge(common_sm_opts(), ExtraOpts) + ExtraOpts -> maps:merge(common_sm_opts(Config), ExtraOpts) end, [{mod_stream_management, config_parser_helper:mod_config(mod_stream_management, SMConfig)}, {mod_offline, config_parser_helper:mod_config(mod_offline, #{})}]. @@ -219,8 +219,9 @@ required_sm_opts(testcase, gc_repeat_after_never_means_no_cleaning) -> required_sm_opts(testcase, gc_repeat_after_timeout_does_clean) -> #{stale_h => stale_h(?SHORT_TIMEOUT, ?SHORT_TIMEOUT)}. -common_sm_opts() -> - #{buffer_max => ?SMALL_SM_BUFFER}. +common_sm_opts(Config) -> + Backend = ct_helper:get_preset_var(Config, stream_management_backend, mnesia), + #{buffer_max => ?SMALL_SM_BUFFER, backend => Backend}. stale_h(RepeatAfter, Geriatric) -> #{enabled => true, diff --git a/rebar.config b/rebar.config index d2e5c9ea37e..5d152b78937 100644 --- a/rebar.config +++ b/rebar.config @@ -80,6 +80,7 @@ {cache_tab, "1.0.30"}, {segmented_cache, "0.2.0"}, {worker_pool, "6.0.1"}, + {cets, {git, "https://github.com/arcusfelis/cets.git", {branch, "main"}}}, %%% HTTP tools {graphql, {git, "https://github.com/esl/graphql-erlang.git", {branch, "master"}}}, @@ -169,11 +170,17 @@ {erl_opts, [{d, 'PROD_NODE'}]} ]}, %% development nodes {mim1, [{relx, [ {overlay_vars, "rel/mim1.vars-toml.config"}, - {overlay, [{template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, + {overlay, [ + {copy, "rel/files/cets_disco.txt", "etc/cets_disco.txt"}, + {template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, {mim2, [{relx, [ {overlay_vars, "rel/mim2.vars-toml.config"}, - {overlay, [{template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, + {overlay, [ + {copy, "rel/files/cets_disco.txt", "etc/cets_disco.txt"}, + {template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, {mim3, [{relx, [ {overlay_vars, "rel/mim3.vars-toml.config"}, - {overlay, [{template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, + {overlay, [ + {copy, "rel/files/cets_disco.txt", "etc/cets_disco.txt"}, + {template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, {fed1, [{relx, [ {overlay_vars, "rel/fed1.vars-toml.config"}, {overlay, [{template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, {reg1, [{relx, [ {overlay_vars, "rel/reg1.vars-toml.config"}, diff --git a/rebar.lock b/rebar.lock index f2c8dcf1e5b..b5378ee23d0 100644 --- a/rebar.lock +++ b/rebar.lock @@ -6,6 +6,10 @@ {<<"bear">>,{pkg,<<"bear">>,<<"1.0.0">>},1}, {<<"cache_tab">>,{pkg,<<"cache_tab">>,<<"1.0.30">>},0}, {<<"certifi">>,{pkg,<<"certifi">>,<<"2.9.0">>},1}, + {<<"cets">>, + {git,"https://github.com/arcusfelis/cets.git", + {ref,"6f8b79889844bf2f3778104bdadfba5ee1efc5fc"}}, + 0}, {<<"cowboy">>,{pkg,<<"cowboy">>,<<"2.9.0">>},0}, {<<"cowboy_swagger">>,{pkg,<<"cowboy_swagger">>,<<"2.5.1">>},0}, {<<"cowlib">>,{pkg,<<"cowlib">>,<<"2.11.0">>},1}, diff --git a/rel/files/cets_disco.txt b/rel/files/cets_disco.txt new file mode 100644 index 00000000000..428fc58a86b --- /dev/null +++ b/rel/files/cets_disco.txt @@ -0,0 +1,3 @@ +mongooseim@localhost +ejabberd2@localhost +mongooseim3@localhost diff --git a/src/ejabberd_ctl.erl b/src/ejabberd_ctl.erl index 4df6e3571b7..5c3244194a9 100644 --- a/src/ejabberd_ctl.erl +++ b/src/ejabberd_ctl.erl @@ -135,6 +135,7 @@ process(["mnesia"]) -> ?STATUS_SUCCESS; process(["mnesia", "info"]) -> mnesia:info(), + cets_info(), ?STATUS_SUCCESS; process(["graphql", Arg]) when is_list(Arg) -> Doc = list_to_binary(Arg), @@ -899,3 +900,16 @@ get_dist_proto() -> {ok, [Proto]} -> Proto; _ -> "inet_tcp" end. + +cets_info() -> + Tables = cets_discovery:info(mongoose_cets_discovery), + cets_info(Tables). + +cets_info([]) -> + ok; +cets_info(Tables) -> + ?PRINT("CETS tables:~n", []), + [cets_table_info(Table) || Table <- Tables]. + +cets_table_info(#{memory := Memory, size := Size, nodes := Nodes, table := Tab}) -> + ?PRINT("table=~0p size=~p memory_words=~0p nodes=~0p~n", [Tab, Size, Memory, Nodes]). diff --git a/src/ejabberd_sm.erl b/src/ejabberd_sm.erl index 61db684c5ec..3c21c69fa75 100644 --- a/src/ejabberd_sm.erl +++ b/src/ejabberd_sm.erl @@ -106,7 +106,7 @@ }. -type info() :: #{info_key() => any()}. --type backend() :: ejabberd_sm_mnesia | ejabberd_sm_redis. +-type backend() :: ejabberd_sm_mnesia | ejabberd_sm_redis | ejabberd_sm_cets. -type close_reason() :: resumed | normal | replaced. -type info_key() :: atom(). diff --git a/src/ejabberd_sm_cets.erl b/src/ejabberd_sm_cets.erl new file mode 100644 index 00000000000..91faaa25b20 --- /dev/null +++ b/src/ejabberd_sm_cets.erl @@ -0,0 +1,139 @@ +-module(ejabberd_sm_cets). + +-behavior(ejabberd_sm_backend). + +-include("mongoose.hrl"). +-include("session.hrl"). + +-export([init/1, + get_sessions/0, + get_sessions/1, + get_sessions/2, + get_sessions/3, + create_session/4, + update_session/4, + delete_session/4, + cleanup/1, + total_count/0, + unique_count/0]). + +-define(TABLE, cets_session). + +-spec init(list()) -> any(). +init(_Opts) -> + cets:start(?TABLE, #{}), + cets_discovery:add_table(mongoose_cets_discovery, ?TABLE). + +-spec get_sessions() -> [ejabberd_sm:session()]. +get_sessions() -> + tuples_to_sessions(ets:tab2list(?TABLE)). + +-spec get_sessions(jid:lserver()) -> [ejabberd_sm:session()]. +get_sessions(Server) -> + R = {{Server, '_', '_', '_'}, '_', '_'}, + Xs = ets:select(?TABLE, [{R, [], ['$_']}]), + tuples_to_sessions(Xs). + +-spec get_sessions(jid:luser(), jid:lserver()) -> [ejabberd_sm:session()]. +get_sessions(User, Server) -> + R = {{Server, User, '_', '_'}, '_', '_'}, + Xs = ets:select(?TABLE, [{R, [], ['$_']}]), + tuples_to_sessions(Xs). + +-spec get_sessions(jid:luser(), jid:lserver(), jid:lresource()) -> + [ejabberd_sm:session()]. +get_sessions(User, Server, Resource) -> + R = {{Server, User, Resource, '_'}, '_', '_'}, + Xs = ets:select(?TABLE, [{R, [], ['$_']}]), + %% TODO these sessions should be deduplicated. + %% It is possible, that after merging two cets tables we could end up + %% with sessions from two nodes for the same full jid. + %% One of the sessions must be killed. + %% We can detect duplicates on the merging step or on reading (or both). + tuples_to_sessions(Xs). + +-spec create_session(User :: jid:luser(), + Server :: jid:lserver(), + Resource :: jid:lresource(), + Session :: ejabberd_sm:session()) -> ok | {error, term()}. +create_session(User, Server, Resource, Session) -> + case get_sessions(User, Server, Resource) of + [] -> + cets:insert(?TABLE, session_to_tuple(Session)); + Sessions when is_list(Sessions) -> + %% Fix potential race condition during XMPP bind, where + %% multiple calls (> 2) to ejabberd_sm:open_session + %% have been made, resulting in >1 sessions for this resource + %% XXX Why do we need that exactly? + %% Sessions are open from c2s and that specific process is updating + %% its session info. Adding info from other processes would cause some + %% strange bugs. On another hand, there is very limited usage + %% of that info field, so nothing would probably break if + %% we keep calling merge_info (and it would make ejabberd_sm_SUITE happy). + MergedSession = mongoose_session:merge_info + (Session, hd(lists:sort(Sessions))), + cets:insert(?TABLE, session_to_tuple(MergedSession)) + end. + +-spec update_session(User :: jid:luser(), + Server :: jid:lserver(), + Resource :: jid:lresource(), + Session :: ejabberd_sm:session()) -> ok | {error, term()}. +update_session(_User, _Server, _Resource, Session) -> + cets:insert(?TABLE, session_to_tuple(Session)). + +-spec delete_session(ejabberd_sm:sid(), + User :: jid:luser(), + Server :: jid:lserver(), + Resource :: jid:lresource()) -> ok. +delete_session(SID, User, Server, Resource) -> + cets:delete(?TABLE, make_key(User, Server, Resource, SID)). + +-spec cleanup(atom()) -> any(). +cleanup(Node) -> + %% TODO this could be optimized, we don't need to replicate deletes, + %% we could just call cleanup on each node (but calling the hook only + %% on one of the nodes) + KeyPattern = {'_', '_', '_', {'_', '$1'}}, + Guard = {'==', {node, '$1'}, Node}, + R = {KeyPattern, '_', '_'}, + cets:sync(?TABLE), + Tuples = ets:select(?TABLE, [{R, [Guard], ['$_']}]), + Keys = lists:map(fun({Key, _, _} = Tuple) -> + Session = tuple_to_session(Tuple), + ejabberd_sm:run_session_cleanup_hook(Session), + Key + end, Tuples), + cets:delete_many(?TABLE, Keys). + +-spec total_count() -> integer(). +total_count() -> + ets:info(?TABLE, size). + +%% Counts merged by US +-spec unique_count() -> integer(). +unique_count() -> + compute_unique(ets:first(?TABLE), 0). + +compute_unique('$end_of_table', Sum) -> + Sum; +compute_unique({S, U, _, _} = Key, Sum) -> + Key2 = ets:next(?TABLE, Key), + case Key2 of + {S, U, _, _} -> + compute_unique(Key2, Sum); + _ -> + compute_unique(Key2, Sum + 1) + end. + +session_to_tuple(#session{sid = SID, usr = {U, S, R}, priority = Prio, info = Info}) -> + {make_key(U, S, R, SID), Prio, Info}. + +make_key(User, Server, Resource, SID) -> + {Server, User, Resource, SID}. + +tuple_to_session({{S, U, R, SID}, Prio, Info}) -> + #session{sid = SID, usr = {U, S, R}, us = {U, S}, priority = Prio, info = Info}. + +tuples_to_sessions(Xs) -> + [tuple_to_session(X) || X <- Xs]. diff --git a/src/ejabberd_sup.erl b/src/ejabberd_sup.erl index ed82cdd5579..5b1cb95812d 100644 --- a/src/ejabberd_sup.erl +++ b/src/ejabberd_sup.erl @@ -153,8 +153,16 @@ init([]) -> {pg, {pg, start_link, [mim_scope]}, permanent, infinity, supervisor, [pg]}, + ConfigDir = filename:dirname(mongoose_config:get_config_path()), + DiscoFile = filename:join(ConfigDir, "cets_disco.txt"), + DiscoOpts = #{name => mongoose_cets_discovery, disco_file => DiscoFile}, + CetsDisco = + {cets_discovery, + {cets_discovery, start_link, [DiscoOpts]}, + permanent, infinity, supervisor, [cets_discovery]}, {ok, {{one_for_one, 10, 1}, - [PG, + [CetsDisco, + PG, Hooks, Cleaner, SMBackendSupervisor, diff --git a/src/mongooseim.app.src b/src/mongooseim.app.src index 03b6ffaa6de..80f93c1d620 100644 --- a/src/mongooseim.app.src +++ b/src/mongooseim.app.src @@ -51,7 +51,8 @@ cowboy_swagger, tomerl, flatlog, - segmented_cache + segmented_cache, + cets ]}, {env, []}, {mod, {ejabberd_app, []}}]}. diff --git a/src/stream_management/mod_stream_management_cets.erl b/src/stream_management/mod_stream_management_cets.erl new file mode 100644 index 00000000000..e7d105b2cb4 --- /dev/null +++ b/src/stream_management/mod_stream_management_cets.erl @@ -0,0 +1,141 @@ +-module(mod_stream_management_cets). +-behaviour(mod_stream_management_backend). +-behaviour(gen_server). + +-include("mongoose.hrl"). +-include("jlib.hrl"). +-include_lib("stdlib/include/ms_transform.hrl"). + +-export([init/2, + register_smid/3, + unregister_smid/2, + get_sid/2]). + +-export([read_stale_h/2, + write_stale_h/3, + delete_stale_h/2]). + +%% Internal exports +-export([start_link/1]). + +%% gen_server callbacks +-export([init/1, handle_call/3, handle_cast/2, handle_info/2]). + +-ignore_xref([start_link/1]). + +-record(smgc_state, + {gc_repeat_after :: non_neg_integer(), + gc_geriatric :: non_neg_integer() }). + +-define(TABLE, cets_strm_man). +-define(TABLE_H, cets_strm_man_h). + +init(_HostType, Opts = #{stale_h := StaleOpts}) -> + cets:start(?TABLE, #{}), + cets_discovery:add_table(mongoose_cets_discovery, ?TABLE), + maybe_init_stale_h(StaleOpts), + ok. + +maybe_init_stale_h(StaleOpts = #{enabled := true}) -> + cets:start(?TABLE_H, #{}), + cets_discovery:add_table(mongoose_cets_discovery, ?TABLE_H), + start_cleaner(StaleOpts); +maybe_init_stale_h(_) -> ok. + +-spec register_smid(HostType, SMID, SID) -> + ok | {error, term()} when + HostType :: mongooseim:host_type(), + SMID :: mod_stream_management:smid(), + SID :: ejabberd_sm:sid(). +register_smid(_HostType, SMID, SID) -> + cets:insert(?TABLE, [{{sid, SID}, SMID}, {{smid, SMID}, SID}]). + +-spec unregister_smid(mongooseim:host_type(), ejabberd_sm:sid()) -> + {ok, SMID :: mod_stream_management:smid()} | {error, smid_not_found}. +unregister_smid(_HostType, SID) -> + case ets:lookup(?TABLE, {sid, SID}) of + [] -> + {error, smid_not_found}; + [{_, SMID}] -> + cets:delete_many(?TABLE, [{sid, SID}, {smid, SMID}]), + {ok, SMID} + end. + +-spec get_sid(mongooseim:host_type(), mod_stream_management:smid()) -> + {sid, ejabberd_sm:sid()} | {error, smid_not_found}. +get_sid(_HostType, SMID) -> + case ets:lookup(?TABLE, {smid, SMID}) of + [] -> + {error, smid_not_found}; + [{_, SID}] -> + {sid, SID} + end. + +%% stale_h functions + +-spec read_stale_h(HostType, SMID) -> + {stale_h, non_neg_integer()} | {error, smid_not_found} when + HostType :: mongooseim:host_type(), + SMID :: mod_stream_management:smid(). +read_stale_h(_HostType, SMID) -> + case ets:lookup(?TABLE_H, SMID) of + [] -> + {error, smid_not_found}; + [{_, H, _}] -> + {stale_h, H} + end. + +-spec write_stale_h(HostType, SMID, H) -> ok | {error, any()} when + HostType :: mongooseim:host_type(), + SMID :: mod_stream_management:smid(), + H :: non_neg_integer(). +write_stale_h(_HostType, SMID, H) -> + Stamp = erlang:monotonic_time(second), + cets:insert(?TABLE_H, {SMID, H, Stamp}). + +-spec delete_stale_h(HostType, SMID) -> ok | {error, any()} when + HostType :: mongooseim:host_type(), + SMID :: mod_stream_management:smid(). +delete_stale_h(_HostType, SMID) -> + cets:delete(?TABLE_H, SMID). + +%% stale_h cleaning logic + +start_cleaner(Opts) -> + MFA = {?MODULE, start_link, [Opts]}, + ChildSpec = {stream_management_stale_h, MFA, permanent, 5000, worker, [?MODULE]}, + %% TODO cleaner should be a service + ejabberd_sup:start_child(ChildSpec). + +start_link(Opts) -> + gen_server:start_link({local, stream_management_stale_h}, ?MODULE, Opts, []). + +init(#{repeat_after := RepeatAfter, geriatric := GeriatricAge}) -> + State = #smgc_state{gc_repeat_after = RepeatAfter, + gc_geriatric = GeriatricAge}, + schedule_check(State), + {ok, State}. + +handle_call(Msg, From, State) -> + ?UNEXPECTED_CALL(Msg, From), + {reply, ok, State}. + +handle_cast(Msg, State) -> + ?UNEXPECTED_CAST(Msg), + {noreply, State}. + +handle_info(check, #smgc_state{gc_geriatric = GeriatricAge} = State) -> + clear_table(GeriatricAge), + schedule_check(State), + {noreply, State}; +handle_info(Info, State) -> + ?UNEXPECTED_INFO(Info), + {noreply, State}. + +schedule_check(#smgc_state{gc_repeat_after = RepeatAfter}) -> + erlang:send_after(RepeatAfter * 1000, self(), check). + +clear_table(GeriatricAge) -> + TimeToDie = erlang:monotonic_time(second) - GeriatricAge, + MS = ets:fun2ms(fun({_, _, S}) when S < TimeToDie -> true end), + ets:select_delete(?TABLE_H, MS). diff --git a/test/ejabberd_sm_SUITE.erl b/test/ejabberd_sm_SUITE.erl index c24d473948c..db9e66dac3a 100644 --- a/test/ejabberd_sm_SUITE.erl +++ b/test/ejabberd_sm_SUITE.erl @@ -13,7 +13,7 @@ -import(config_parser_helper, [default_config/1]). -all() -> [{group, mnesia}, {group, redis}]. +all() -> [{group, mnesia}, {group, redis}, {group, cets}]. init_per_suite(C) -> {ok, _} = application:ensure_all_started(jid), @@ -33,7 +33,8 @@ end_per_suite(C) -> groups() -> [{mnesia, [], tests()}, - {redis, [], tests()}]. + {redis, [], tests()}, + {cets, [], tests()}]. tests() -> [open_session, @@ -64,7 +65,11 @@ init_per_group(mnesia, Config) -> ok = mnesia:start(), [{backend, ejabberd_sm_mnesia} | Config]; init_per_group(redis, Config) -> - init_redis_group(is_redis_running(), Config). + init_redis_group(is_redis_running(), Config); +init_per_group(cets, Config) -> + DiscoOpts = #{name => mongoose_cets_discovery, disco_file => "does_not_exist.txt"}, + {ok, Pid} = cets_discovery:start(DiscoOpts), + [{backend, ejabberd_sm_cets}, {cets_disco_pid, Pid} | Config]. init_redis_group(true, Config) -> Self = self(), @@ -86,7 +91,10 @@ end_per_group(mnesia, Config) -> mnesia:stop(), mnesia:delete_schema([node()]), Config; -end_per_group(_, Config) -> +end_per_group(cets, Config) -> + exit(proplists:get_value(cets_disco_pid, Config), kill), + Config; +end_per_group(redis, Config) -> whereis(test_helper) ! stop, Config. @@ -447,6 +455,8 @@ get_fun_for_unique_count(ejabberd_sm_mnesia) -> fun() -> mnesia:abort({badarg,[session,{{1442,941593,580189},list_to_pid("<0.23291.6>")}]}) end; +get_fun_for_unique_count(ejabberd_sm_cets) -> + fun() -> error(oops) end; get_fun_for_unique_count(ejabberd_sm_redis) -> fun() -> %% The code below is on purpose, it's to crash with badarg reason @@ -493,6 +503,8 @@ verify_session_opened(C, Sid, USR) -> do_verify_session_opened(ejabberd_sm_mnesia, Sid, {U, S, R} = USR) -> general_session_check(ejabberd_sm_mnesia, Sid, USR, U, S, R); +do_verify_session_opened(ejabberd_sm_cets, Sid, {U, S, R} = USR) -> + general_session_check(ejabberd_sm_cets, Sid, USR, U, S, R); do_verify_session_opened(ejabberd_sm_redis, Sid, {U, S, R} = USR) -> UHash = iolist_to_binary(hash(U, S, R, Sid)), Hashes = mongoose_redis:cmd(["SMEMBERS", n(node())]), @@ -521,7 +533,9 @@ clean_sessions(C) -> ejabberd_sm_mnesia -> mnesia:clear_table(session); ejabberd_sm_redis -> - mongoose_redis:cmd(["FLUSHALL"]) + mongoose_redis:cmd(["FLUSHALL"]); + ejabberd_sm_cets -> + ets:delete_all_objects(cets_session) end. generate_random_user(S) -> @@ -603,6 +617,8 @@ setup_sm(Config) -> ejabberd_sm_redis -> mongoose_redis:cmd(["FLUSHALL"]); ejabberd_sm_mnesia -> + ok; + ejabberd_sm_cets -> ok end. @@ -622,7 +638,8 @@ opts(Config) -> {sm_backend, sm_backend(?config(backend, Config))}]. sm_backend(ejabberd_sm_redis) -> redis; -sm_backend(ejabberd_sm_mnesia) -> mnesia. +sm_backend(ejabberd_sm_mnesia) -> mnesia; +sm_backend(ejabberd_sm_cets) -> cets. set_meck() -> meck:expect(gen_hook, add_handler, fun(_, _, _, _, _) -> ok end), diff --git a/tools/build-releases.sh b/tools/build-releases.sh index 27765e874c3..2df2cec6e18 100755 --- a/tools/build-releases.sh +++ b/tools/build-releases.sh @@ -31,6 +31,7 @@ function try_copy_release --exclude rel/mongooseim/Mnesia.* \ --exclude rel/mongooseim/var \ --exclude rel/mongooseim/log \ + --exclude rel/mongooseim/etc/cets_disco.txt \ -al _build/$FIRST_NODE/ _build/$NODE/ ./tools/test_runner/apply_templates.sh "$NODE" "$(pwd)/_build/$NODE/" fi diff --git a/tools/test_runner/apply_templates.erl b/tools/test_runner/apply_templates.erl index cef57a23d42..e18bafaf192 100644 --- a/tools/test_runner/apply_templates.erl +++ b/tools/test_runner/apply_templates.erl @@ -10,7 +10,7 @@ main([NodeAtom, BuildDirAtom]) -> log("BuildDirAtom=~p~n", [BuildDirAtom]), BuildDir = atom_to_list(BuildDirAtom), RelDir = BuildDir ++ "/rel/mongooseim", - Templates = templates(RelDir), + Templates = templates(RelDir, NodeAtom), log("Templates:~n~p~n", [Templates]), Vars0 = overlay_vars(NodeAtom), Vars = Vars0#{output_dir => list_to_binary(RelDir)}, @@ -39,8 +39,9 @@ ensure_binary_strings(Vars) -> end, Vars). %% Based on rebar.config overlay section -templates(RelDir) -> - simple_templates(RelDir) ++ erts_templates(RelDir). +templates(RelDir, NodeAtom) -> + simple_templates(RelDir) ++ erts_templates(RelDir) + ++ disco_template(RelDir, NodeAtom). simple_templates(RelDir) -> [{In, RelDir ++ "/" ++ Out} || {In, Out} <- simple_templates()]. @@ -60,6 +61,14 @@ erts_templates(RelDir) -> ErtsDirs = filelib:wildcard(RelDir ++ "/erts-*"), [{"rel/files/nodetool", ErtsDir ++ "/bin/nodetool"} || ErtsDir <- ErtsDirs]. +disco_template(RelDir, NodeAtom) -> + case lists:member(NodeAtom, [mim1, mim2, mim3]) of + true -> + [{"rel/files/cets_disco.txt", RelDir ++ "/etc/cets_disco.txt"}]; + false -> + [] + end. + render_template(In, Out, Vars) -> BinIn = bbmustache:parse_file(In), %% Do render twice to allow templates in variables From 839e6065be1438384299482625922b2b5803001e Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 19 Apr 2022 10:54:23 +0200 Subject: [PATCH 002/161] Use mongoose_cleaner for mod_stream_management_cets --- src/ejabberd_sm_cets.erl | 2 +- .../mod_stream_management_cets.erl | 60 ++++--------------- 2 files changed, 14 insertions(+), 48 deletions(-) diff --git a/src/ejabberd_sm_cets.erl b/src/ejabberd_sm_cets.erl index 91faaa25b20..a307699a66c 100644 --- a/src/ejabberd_sm_cets.erl +++ b/src/ejabberd_sm_cets.erl @@ -19,7 +19,7 @@ -define(TABLE, cets_session). --spec init(list()) -> any(). +-spec init(map()) -> any(). init(_Opts) -> cets:start(?TABLE, #{}), cets_discovery:add_table(mongoose_cets_discovery, ?TABLE). diff --git a/src/stream_management/mod_stream_management_cets.erl b/src/stream_management/mod_stream_management_cets.erl index e7d105b2cb4..49fe98f7ef5 100644 --- a/src/stream_management/mod_stream_management_cets.erl +++ b/src/stream_management/mod_stream_management_cets.erl @@ -1,6 +1,5 @@ -module(mod_stream_management_cets). -behaviour(mod_stream_management_backend). --behaviour(gen_server). -include("mongoose.hrl"). -include("jlib.hrl"). @@ -15,32 +14,24 @@ write_stale_h/3, delete_stale_h/2]). -%% Internal exports --export([start_link/1]). - -%% gen_server callbacks --export([init/1, handle_call/3, handle_cast/2, handle_info/2]). +-export([clear_table/2]). -ignore_xref([start_link/1]). --record(smgc_state, - {gc_repeat_after :: non_neg_integer(), - gc_geriatric :: non_neg_integer() }). - -define(TABLE, cets_strm_man). -define(TABLE_H, cets_strm_man_h). -init(_HostType, Opts = #{stale_h := StaleOpts}) -> +init(HostType, #{stale_h := StaleOpts}) -> cets:start(?TABLE, #{}), cets_discovery:add_table(mongoose_cets_discovery, ?TABLE), - maybe_init_stale_h(StaleOpts), + maybe_init_stale_h(HostType, StaleOpts), ok. -maybe_init_stale_h(StaleOpts = #{enabled := true}) -> +maybe_init_stale_h(HostType, StaleOpts = #{enabled := true}) -> cets:start(?TABLE_H, #{}), cets_discovery:add_table(mongoose_cets_discovery, ?TABLE_H), - start_cleaner(StaleOpts); -maybe_init_stale_h(_) -> ok. + start_cleaner(HostType, StaleOpts); +maybe_init_stale_h(_, _) -> ok. -spec register_smid(HostType, SMID, SID) -> ok | {error, term()} when @@ -101,41 +92,16 @@ delete_stale_h(_HostType, SMID) -> %% stale_h cleaning logic -start_cleaner(Opts) -> - MFA = {?MODULE, start_link, [Opts]}, - ChildSpec = {stream_management_stale_h, MFA, permanent, 5000, worker, [?MODULE]}, +start_cleaner(HostType, #{repeat_after := Interval, geriatric := TTL}) -> + Name = gen_mod:get_module_proc(HostType, stream_management_stale_h), + WOpts = #{host_type => HostType, action => fun ?MODULE:clear_table/2, + opts => TTL, interval => Interval}, + MFA = {mongoose_collector, start_link, [Name, WOpts]}, + ChildSpec = {Name, MFA, permanent, 5000, worker, [?MODULE]}, %% TODO cleaner should be a service ejabberd_sup:start_child(ChildSpec). -start_link(Opts) -> - gen_server:start_link({local, stream_management_stale_h}, ?MODULE, Opts, []). - -init(#{repeat_after := RepeatAfter, geriatric := GeriatricAge}) -> - State = #smgc_state{gc_repeat_after = RepeatAfter, - gc_geriatric = GeriatricAge}, - schedule_check(State), - {ok, State}. - -handle_call(Msg, From, State) -> - ?UNEXPECTED_CALL(Msg, From), - {reply, ok, State}. - -handle_cast(Msg, State) -> - ?UNEXPECTED_CAST(Msg), - {noreply, State}. - -handle_info(check, #smgc_state{gc_geriatric = GeriatricAge} = State) -> - clear_table(GeriatricAge), - schedule_check(State), - {noreply, State}; -handle_info(Info, State) -> - ?UNEXPECTED_INFO(Info), - {noreply, State}. - -schedule_check(#smgc_state{gc_repeat_after = RepeatAfter}) -> - erlang:send_after(RepeatAfter * 1000, self(), check). - -clear_table(GeriatricAge) -> +clear_table(_HostType, GeriatricAge) -> TimeToDie = erlang:monotonic_time(second) - GeriatricAge, MS = ets:fun2ms(fun({_, _, S}) when S < TimeToDie -> true end), ets:select_delete(?TABLE_H, MS). From 726f361333df706374d226dbd127d0f9315939ed Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 6 May 2022 10:29:54 +0200 Subject: [PATCH 003/161] Update cets --- rebar.config | 2 +- rebar.lock | 4 ++-- src/stream_management/mod_stream_management_cets.erl | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/rebar.config b/rebar.config index 5d152b78937..5c8fbf53b2a 100644 --- a/rebar.config +++ b/rebar.config @@ -80,7 +80,7 @@ {cache_tab, "1.0.30"}, {segmented_cache, "0.2.0"}, {worker_pool, "6.0.1"}, - {cets, {git, "https://github.com/arcusfelis/cets.git", {branch, "main"}}}, + {cets, {git, "https://github.com/esl/cets.git", {branch, "rebased"}}}, %%% HTTP tools {graphql, {git, "https://github.com/esl/graphql-erlang.git", {branch, "master"}}}, diff --git a/rebar.lock b/rebar.lock index b5378ee23d0..c22cd5d76d2 100644 --- a/rebar.lock +++ b/rebar.lock @@ -7,8 +7,8 @@ {<<"cache_tab">>,{pkg,<<"cache_tab">>,<<"1.0.30">>},0}, {<<"certifi">>,{pkg,<<"certifi">>,<<"2.9.0">>},1}, {<<"cets">>, - {git,"https://github.com/arcusfelis/cets.git", - {ref,"6f8b79889844bf2f3778104bdadfba5ee1efc5fc"}}, + {git,"https://github.com/esl/cets.git", + {ref,"ebe123a44c35ebbe9ab06c453840bac45467c510"}}, 0}, {<<"cowboy">>,{pkg,<<"cowboy">>,<<"2.9.0">>},0}, {<<"cowboy_swagger">>,{pkg,<<"cowboy_swagger">>,<<"2.5.1">>},0}, diff --git a/src/stream_management/mod_stream_management_cets.erl b/src/stream_management/mod_stream_management_cets.erl index 49fe98f7ef5..1278bfafdf3 100644 --- a/src/stream_management/mod_stream_management_cets.erl +++ b/src/stream_management/mod_stream_management_cets.erl @@ -39,7 +39,7 @@ maybe_init_stale_h(_, _) -> ok. SMID :: mod_stream_management:smid(), SID :: ejabberd_sm:sid(). register_smid(_HostType, SMID, SID) -> - cets:insert(?TABLE, [{{sid, SID}, SMID}, {{smid, SMID}, SID}]). + cets:insert_many(?TABLE, [{{sid, SID}, SMID}, {{smid, SMID}, SID}]). -spec unregister_smid(mongooseim:host_type(), ejabberd_sm:sid()) -> {ok, SMID :: mod_stream_management:smid()} | {error, smid_not_found}. From e4c0bff46aefbd80235f0f3cf9fcf0cac7f0c13d Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 18 May 2022 14:33:51 +0200 Subject: [PATCH 004/161] Add a note about partially bound key select --- src/ejabberd_sm_cets.erl | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/ejabberd_sm_cets.erl b/src/ejabberd_sm_cets.erl index a307699a66c..510ef0af6b6 100644 --- a/src/ejabberd_sm_cets.erl +++ b/src/ejabberd_sm_cets.erl @@ -30,6 +30,11 @@ get_sessions() -> -spec get_sessions(jid:lserver()) -> [ejabberd_sm:session()]. get_sessions(Server) -> + %% This is not a full table scan. From the ETS docs: + %% For ordered_set a partially bound key will limit the traversal to only + %% scan a subset of the table based on term order. + %% A partially bound key is either a list or a tuple with + %% a prefix that is fully bound. R = {{Server, '_', '_', '_'}, '_', '_'}, Xs = ets:select(?TABLE, [{R, [], ['$_']}]), tuples_to_sessions(Xs). @@ -98,6 +103,7 @@ cleanup(Node) -> Guard = {'==', {node, '$1'}, Node}, R = {KeyPattern, '_', '_'}, cets:sync(?TABLE), + %% This is a full table scan, but cleanup is rare. Tuples = ets:select(?TABLE, [{R, [Guard], ['$_']}]), Keys = lists:map(fun({Key, _, _} = Tuple) -> Session = tuple_to_session(Tuple), From 50f8ece66553a00c5c138e70db124365bfde1c02 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 21 Mar 2023 11:18:39 +0100 Subject: [PATCH 005/161] Make start_common function with common logic --- src/inbox/mod_inbox.erl | 5 +---- src/mongoose_collector.erl | 8 ++++++++ .../mod_stream_management_cets.erl | 20 ++++++++----------- .../mod_stream_management_mnesia.erl | 7 ++----- 4 files changed, 19 insertions(+), 21 deletions(-) diff --git a/src/inbox/mod_inbox.erl b/src/inbox/mod_inbox.erl index cc1490a4adc..6d119411870 100644 --- a/src/inbox/mod_inbox.erl +++ b/src/inbox/mod_inbox.erl @@ -170,12 +170,9 @@ process_inbox_boxes(Config = #{boxes := Boxes}) -> %% Cleaner gen_server callbacks start_cleaner(HostType, #{bin_ttl := TTL, bin_clean_after := Interval}) -> - Name = gen_mod:get_module_proc(HostType, ?MODULE), WOpts = #{host_type => HostType, action => fun mod_inbox_api:flush_global_bin/2, opts => TTL, interval => Interval}, - MFA = {mongoose_collector, start_link, [Name, WOpts]}, - ChildSpec = {Name, MFA, permanent, 5000, worker, [?MODULE]}, - ejabberd_sup:start_child(ChildSpec). + mongoose_collector:start_common(?MODULE, HostType, WOpts). stop_cleaner(HostType) -> Name = gen_mod:get_module_proc(HostType, ?MODULE), diff --git a/src/mongoose_collector.erl b/src/mongoose_collector.erl index 81869273a2e..bf3fc7c05bb 100644 --- a/src/mongoose_collector.erl +++ b/src/mongoose_collector.erl @@ -4,6 +4,7 @@ %% gen_server callbacks -behaviour(gen_server). +-export([start_common/3]). -export([start_link/2, init/1, handle_call/3, handle_cast/2, handle_info/2]). -ignore_xref([start_link/2]). @@ -16,6 +17,13 @@ timer_ref :: undefined | reference() }). +-spec start_common(atom(), mongooseim:host_type(), map()) -> term(). +start_common(Module, HostType, WOpts) -> + Name = gen_mod:get_module_proc(HostType, Module), + MFA = {mongoose_collector, start_link, [Name, WOpts]}, + ChildSpec = {Name, MFA, permanent, 5000, worker, [Module, mongoose_collector]}, + ejabberd_sup:start_child(ChildSpec). + start_link(Name, Opts) -> gen_server:start_link({local, Name}, ?MODULE, Opts, []). diff --git a/src/stream_management/mod_stream_management_cets.erl b/src/stream_management/mod_stream_management_cets.erl index 1278bfafdf3..c13bd93b5ff 100644 --- a/src/stream_management/mod_stream_management_cets.erl +++ b/src/stream_management/mod_stream_management_cets.erl @@ -18,8 +18,8 @@ -ignore_xref([start_link/1]). --define(TABLE, cets_strm_man). --define(TABLE_H, cets_strm_man_h). +-define(TABLE, cets_stream_management_session). +-define(TABLE_H, cets_stream_management_stale_h). init(HostType, #{stale_h := StaleOpts}) -> cets:start(?TABLE, #{}), @@ -33,8 +33,7 @@ maybe_init_stale_h(HostType, StaleOpts = #{enabled := true}) -> start_cleaner(HostType, StaleOpts); maybe_init_stale_h(_, _) -> ok. --spec register_smid(HostType, SMID, SID) -> - ok | {error, term()} when +-spec register_smid(HostType, SMID, SID) -> ok when HostType :: mongooseim:host_type(), SMID :: mod_stream_management:smid(), SID :: ejabberd_sm:sid(). @@ -76,7 +75,7 @@ read_stale_h(_HostType, SMID) -> {stale_h, H} end. --spec write_stale_h(HostType, SMID, H) -> ok | {error, any()} when +-spec write_stale_h(HostType, SMID, H) -> ok when HostType :: mongooseim:host_type(), SMID :: mod_stream_management:smid(), H :: non_neg_integer(). @@ -84,7 +83,7 @@ write_stale_h(_HostType, SMID, H) -> Stamp = erlang:monotonic_time(second), cets:insert(?TABLE_H, {SMID, H, Stamp}). --spec delete_stale_h(HostType, SMID) -> ok | {error, any()} when +-spec delete_stale_h(HostType, SMID) -> ok when HostType :: mongooseim:host_type(), SMID :: mod_stream_management:smid(). delete_stale_h(_HostType, SMID) -> @@ -93,13 +92,10 @@ delete_stale_h(_HostType, SMID) -> %% stale_h cleaning logic start_cleaner(HostType, #{repeat_after := Interval, geriatric := TTL}) -> - Name = gen_mod:get_module_proc(HostType, stream_management_stale_h), - WOpts = #{host_type => HostType, action => fun ?MODULE:clear_table/2, - opts => TTL, interval => Interval}, - MFA = {mongoose_collector, start_link, [Name, WOpts]}, - ChildSpec = {Name, MFA, permanent, 5000, worker, [?MODULE]}, %% TODO cleaner should be a service - ejabberd_sup:start_child(ChildSpec). + WOpts = #{host_type => HostType, action => fun ?MODULE:clear_table/2, + opts => TTL, interval => timer:seconds(Interval)}, + mongoose_collector:start_common(?MODULE, HostType, WOpts). clear_table(_HostType, GeriatricAge) -> TimeToDie = erlang:monotonic_time(second) - GeriatricAge, diff --git a/src/stream_management/mod_stream_management_mnesia.erl b/src/stream_management/mod_stream_management_mnesia.erl index d441c360afc..143b578a02d 100644 --- a/src/stream_management/mod_stream_management_mnesia.erl +++ b/src/stream_management/mod_stream_management_mnesia.erl @@ -116,13 +116,10 @@ delete_stale_h(_HostType, SMID) -> %% stale_h cleaning logic start_cleaner(HostType, #{repeat_after := Interval, geriatric := TTL}) -> - Name = gen_mod:get_module_proc(HostType, stream_management_stale_h), + %% TODO cleaner should be a service WOpts = #{host_type => HostType, action => fun ?MODULE:clear_table/2, opts => TTL, interval => timer:seconds(Interval)}, - MFA = {mongoose_collector, start_link, [Name, WOpts]}, - ChildSpec = {Name, MFA, permanent, 5000, worker, [?MODULE]}, - %% TODO cleaner should be a service - ejabberd_sup:start_child(ChildSpec). + mongoose_collector:start_common(?MODULE, HostType, WOpts). clear_table(_HostType, GeriatricAge) -> TimeToDie = erlang:monotonic_time(second) - GeriatricAge, From 195bb47a749ab2ba00ab3da0bc66025d08f87328 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 22 Mar 2023 10:23:51 +0100 Subject: [PATCH 006/161] Remove cets_info --- src/ejabberd_ctl.erl | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/src/ejabberd_ctl.erl b/src/ejabberd_ctl.erl index 5c3244194a9..4df6e3571b7 100644 --- a/src/ejabberd_ctl.erl +++ b/src/ejabberd_ctl.erl @@ -135,7 +135,6 @@ process(["mnesia"]) -> ?STATUS_SUCCESS; process(["mnesia", "info"]) -> mnesia:info(), - cets_info(), ?STATUS_SUCCESS; process(["graphql", Arg]) when is_list(Arg) -> Doc = list_to_binary(Arg), @@ -900,16 +899,3 @@ get_dist_proto() -> {ok, [Proto]} -> Proto; _ -> "inet_tcp" end. - -cets_info() -> - Tables = cets_discovery:info(mongoose_cets_discovery), - cets_info(Tables). - -cets_info([]) -> - ok; -cets_info(Tables) -> - ?PRINT("CETS tables:~n", []), - [cets_table_info(Table) || Table <- Tables]. - -cets_table_info(#{memory := Memory, size := Size, nodes := Nodes, table := Tab}) -> - ?PRINT("table=~0p size=~p memory_words=~0p nodes=~0p~n", [Tab, Size, Memory, Nodes]). From 048253319eca94f685e6ed835766db25e02743a4 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 29 Mar 2023 11:02:14 +0200 Subject: [PATCH 007/161] Clean SM state locally --- src/ejabberd_sm_cets.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/ejabberd_sm_cets.erl b/src/ejabberd_sm_cets.erl index 510ef0af6b6..c6cd1bb1e3a 100644 --- a/src/ejabberd_sm_cets.erl +++ b/src/ejabberd_sm_cets.erl @@ -94,11 +94,9 @@ update_session(_User, _Server, _Resource, Session) -> delete_session(SID, User, Server, Resource) -> cets:delete(?TABLE, make_key(User, Server, Resource, SID)). +%% cleanup is called on each node in the cluster, when Node is down -spec cleanup(atom()) -> any(). cleanup(Node) -> - %% TODO this could be optimized, we don't need to replicate deletes, - %% we could just call cleanup on each node (but calling the hook only - %% on one of the nodes) KeyPattern = {'_', '_', '_', {'_', '$1'}}, Guard = {'==', {node, '$1'}, Node}, R = {KeyPattern, '_', '_'}, @@ -110,7 +108,9 @@ cleanup(Node) -> ejabberd_sm:run_session_cleanup_hook(Session), Key end, Tuples), - cets:delete_many(?TABLE, Keys). + %% We don't need to replicate deletes + %% We remove the local content here + ets:select_delete(?TABLE, [{R, [Guard], [true]}]). -spec total_count() -> integer(). total_count() -> From 76e7703da79dff04ab3747dd62fe95fd56ef3257 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 29 Mar 2023 11:15:19 +0200 Subject: [PATCH 008/161] Remove long comment from create_session function --- src/ejabberd_sm_cets.erl | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/ejabberd_sm_cets.erl b/src/ejabberd_sm_cets.erl index c6cd1bb1e3a..7cf96b315d0 100644 --- a/src/ejabberd_sm_cets.erl +++ b/src/ejabberd_sm_cets.erl @@ -66,15 +66,7 @@ create_session(User, Server, Resource, Session) -> [] -> cets:insert(?TABLE, session_to_tuple(Session)); Sessions when is_list(Sessions) -> - %% Fix potential race condition during XMPP bind, where - %% multiple calls (> 2) to ejabberd_sm:open_session - %% have been made, resulting in >1 sessions for this resource - %% XXX Why do we need that exactly? - %% Sessions are open from c2s and that specific process is updating - %% its session info. Adding info from other processes would cause some - %% strange bugs. On another hand, there is very limited usage - %% of that info field, so nothing would probably break if - %% we keep calling merge_info (and it would make ejabberd_sm_SUITE happy). + %% TODO merge_info function would be removed, once MIM-1875 is done MergedSession = mongoose_session:merge_info (Session, hd(lists:sort(Sessions))), cets:insert(?TABLE, session_to_tuple(MergedSession)) From 279d9c7b7a3b02e88394dd45f4ac89b621dd2416 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 29 Mar 2023 11:15:36 +0200 Subject: [PATCH 009/161] Update CETS --- rebar.config | 2 +- rebar.lock | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/rebar.config b/rebar.config index 5c8fbf53b2a..8dab60b621c 100644 --- a/rebar.config +++ b/rebar.config @@ -80,7 +80,7 @@ {cache_tab, "1.0.30"}, {segmented_cache, "0.2.0"}, {worker_pool, "6.0.1"}, - {cets, {git, "https://github.com/esl/cets.git", {branch, "rebased"}}}, + {cets, {git, "https://github.com/esl/cets.git", {branch, "main"}}}, %%% HTTP tools {graphql, {git, "https://github.com/esl/graphql-erlang.git", {branch, "master"}}}, diff --git a/rebar.lock b/rebar.lock index c22cd5d76d2..c010210f6b3 100644 --- a/rebar.lock +++ b/rebar.lock @@ -8,7 +8,7 @@ {<<"certifi">>,{pkg,<<"certifi">>,<<"2.9.0">>},1}, {<<"cets">>, {git,"https://github.com/esl/cets.git", - {ref,"ebe123a44c35ebbe9ab06c453840bac45467c510"}}, + {ref,"d90265d5ce06054e58a588c74df0347bab03c5c2"}}, 0}, {<<"cowboy">>,{pkg,<<"cowboy">>,<<"2.9.0">>},0}, {<<"cowboy_swagger">>,{pkg,<<"cowboy_swagger">>,<<"2.5.1">>},0}, From 49f4ac77b029b86fb5b339a08da107150f3afa58 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 30 Mar 2023 20:20:36 +0200 Subject: [PATCH 010/161] Add callback to stop cleaner in mod_stream_management Otherwise sm_SUITE fails with already_started error --- src/ejabberd_sm_cets.erl | 5 ++--- src/mongoose_collector.erl | 6 +++++- src/stream_management/mod_stream_management.erl | 3 ++- src/stream_management/mod_stream_management_backend.erl | 9 +++++++++ src/stream_management/mod_stream_management_cets.erl | 7 +++++++ src/stream_management/mod_stream_management_mnesia.erl | 7 +++++++ 6 files changed, 32 insertions(+), 5 deletions(-) diff --git a/src/ejabberd_sm_cets.erl b/src/ejabberd_sm_cets.erl index 7cf96b315d0..c5b51106c27 100644 --- a/src/ejabberd_sm_cets.erl +++ b/src/ejabberd_sm_cets.erl @@ -95,10 +95,9 @@ cleanup(Node) -> cets:sync(?TABLE), %% This is a full table scan, but cleanup is rare. Tuples = ets:select(?TABLE, [{R, [Guard], ['$_']}]), - Keys = lists:map(fun({Key, _, _} = Tuple) -> + lists:foreach(fun({Key, _, _} = Tuple) -> Session = tuple_to_session(Tuple), - ejabberd_sm:run_session_cleanup_hook(Session), - Key + ejabberd_sm:run_session_cleanup_hook(Session) end, Tuples), %% We don't need to replicate deletes %% We remove the local content here diff --git a/src/mongoose_collector.erl b/src/mongoose_collector.erl index bf3fc7c05bb..ebc356153ff 100644 --- a/src/mongoose_collector.erl +++ b/src/mongoose_collector.erl @@ -4,7 +4,7 @@ %% gen_server callbacks -behaviour(gen_server). --export([start_common/3]). +-export([start_common/3, stop_common/2]). -export([start_link/2, init/1, handle_call/3, handle_cast/2, handle_info/2]). -ignore_xref([start_link/2]). @@ -24,6 +24,10 @@ start_common(Module, HostType, WOpts) -> ChildSpec = {Name, MFA, permanent, 5000, worker, [Module, mongoose_collector]}, ejabberd_sup:start_child(ChildSpec). +stop_common(Module, HostType) -> + Name = gen_mod:get_module_proc(HostType, Module), + ejabberd_sup:stop_child(Name). + start_link(Name, Opts) -> gen_server:start_link({local, Name}, ?MODULE, Opts, []). diff --git a/src/stream_management/mod_stream_management.erl b/src/stream_management/mod_stream_management.erl index 868f53dbf48..c92a973ddc6 100644 --- a/src/stream_management/mod_stream_management.erl +++ b/src/stream_management/mod_stream_management.erl @@ -79,8 +79,9 @@ start(HostType, Opts) -> ?LOG_INFO(#{what => stream_management_starting}). -spec stop(mongooseim:host_type()) -> ok. -stop(_HostType) -> +stop(HostType) -> ?LOG_INFO(#{what => stream_management_stopping}), + mod_stream_management_backend:stop(HostType), ok. -spec hooks(mongooseim:host_type()) -> gen_hook:hook_list(). diff --git a/src/stream_management/mod_stream_management_backend.erl b/src/stream_management/mod_stream_management_backend.erl index d7c0b95692d..c9f37381278 100644 --- a/src/stream_management/mod_stream_management_backend.erl +++ b/src/stream_management/mod_stream_management_backend.erl @@ -1,5 +1,6 @@ -module(mod_stream_management_backend). -export([init/2, + stop/1, register_smid/3, unregister_smid/2, get_sid/2]). @@ -18,6 +19,9 @@ HostType :: mongooseim:host_type(), Opts :: gen_mod:module_opts(). +-callback stop(HostType) -> ok when + HostType :: mongooseim:host_type(). + -callback register_smid(HostType, SMID, SID) -> ok | {error, term()} when HostType :: mongooseim:host_type(), @@ -58,6 +62,11 @@ init(HostType, Opts) -> Args = [HostType, Opts], mongoose_backend:call(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec stop(HostType :: mongooseim:host_type()) -> ok. +stop(HostType) -> + Args = [HostType], + mongoose_backend:call(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + -spec register_smid(HostType, SMID, SID) -> ok | {error, term()} when HostType :: mongooseim:host_type(), diff --git a/src/stream_management/mod_stream_management_cets.erl b/src/stream_management/mod_stream_management_cets.erl index c13bd93b5ff..058fcd84db5 100644 --- a/src/stream_management/mod_stream_management_cets.erl +++ b/src/stream_management/mod_stream_management_cets.erl @@ -6,6 +6,7 @@ -include_lib("stdlib/include/ms_transform.hrl"). -export([init/2, + stop/1, register_smid/3, unregister_smid/2, get_sid/2]). @@ -27,6 +28,9 @@ init(HostType, #{stale_h := StaleOpts}) -> maybe_init_stale_h(HostType, StaleOpts), ok. +stop(HostType) -> + stop_cleaner(HostType). + maybe_init_stale_h(HostType, StaleOpts = #{enabled := true}) -> cets:start(?TABLE_H, #{}), cets_discovery:add_table(mongoose_cets_discovery, ?TABLE_H), @@ -97,6 +101,9 @@ start_cleaner(HostType, #{repeat_after := Interval, geriatric := TTL}) -> opts => TTL, interval => timer:seconds(Interval)}, mongoose_collector:start_common(?MODULE, HostType, WOpts). +stop_cleaner(HostType) -> + mongoose_collector:stop_common(?MODULE, HostType). + clear_table(_HostType, GeriatricAge) -> TimeToDie = erlang:monotonic_time(second) - GeriatricAge, MS = ets:fun2ms(fun({_, _, S}) when S < TimeToDie -> true end), diff --git a/src/stream_management/mod_stream_management_mnesia.erl b/src/stream_management/mod_stream_management_mnesia.erl index 143b578a02d..c455fd01a67 100644 --- a/src/stream_management/mod_stream_management_mnesia.erl +++ b/src/stream_management/mod_stream_management_mnesia.erl @@ -5,6 +5,7 @@ -include_lib("stdlib/include/ms_transform.hrl"). -export([init/2, + stop/1, register_smid/3, unregister_smid/2, get_sid/2]). @@ -33,6 +34,9 @@ init(HostType, #{stale_h := StaleOpts}) -> maybe_init_stale_h(HostType, StaleOpts), ok. +stop(HostType) -> + stop_cleaner(HostType). + maybe_init_stale_h(HostType, StaleOpts = #{enabled := true}) -> ?LOG_INFO(#{what => stream_mgmt_stale_h_start}), mnesia:create_table(stream_mgmt_stale_h, @@ -121,6 +125,9 @@ start_cleaner(HostType, #{repeat_after := Interval, geriatric := TTL}) -> opts => TTL, interval => timer:seconds(Interval)}, mongoose_collector:start_common(?MODULE, HostType, WOpts). +stop_cleaner(HostType) -> + mongoose_collector:stop_common(?MODULE, HostType). + clear_table(_HostType, GeriatricAge) -> TimeToDie = erlang:monotonic_time(second) - GeriatricAge, MS = ets:fun2ms(fun(#stream_mgmt_stale_h{stamp = S}) when S < TimeToDie -> true end), From a360ccde8bd205334dc3ebd2465e3baf2a134d54 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 30 Mar 2023 20:21:33 +0200 Subject: [PATCH 011/161] Upgrade cets with dialyzer fix --- rebar.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rebar.lock b/rebar.lock index c010210f6b3..12f14278b9d 100644 --- a/rebar.lock +++ b/rebar.lock @@ -8,7 +8,7 @@ {<<"certifi">>,{pkg,<<"certifi">>,<<"2.9.0">>},1}, {<<"cets">>, {git,"https://github.com/esl/cets.git", - {ref,"d90265d5ce06054e58a588c74df0347bab03c5c2"}}, + {ref,"351221c7a2f2c64f7ebc163428f8d340b71705ac"}}, 0}, {<<"cowboy">>,{pkg,<<"cowboy">>,<<"2.9.0">>},0}, {<<"cowboy_swagger">>,{pkg,<<"cowboy_swagger">>,<<"2.5.1">>},0}, From 3a34358f39776439a5554aa00b3b4a57268a007e Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 31 Mar 2023 08:52:26 +0200 Subject: [PATCH 012/161] Remove cets_tables_are_in_mnesia_info testcase --- big_tests/tests/mongooseimctl_SUITE.erl | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/big_tests/tests/mongooseimctl_SUITE.erl b/big_tests/tests/mongooseimctl_SUITE.erl index e59671e92d0..b4a57679825 100644 --- a/big_tests/tests/mongooseimctl_SUITE.erl +++ b/big_tests/tests/mongooseimctl_SUITE.erl @@ -136,8 +136,7 @@ basic() -> dump_table, get_loglevel, remove_old_messages_test, - remove_expired_messages_test, - cets_tables_are_in_mnesia_info]. + remove_expired_messages_test]. accounts() -> [change_password, check_password_hash, check_password, check_account, ban_account, num_active_users, delete_old_users, @@ -312,13 +311,6 @@ end_per_group(_GroupName, Config) -> get_registered_users() -> rpc(mim(), ejabberd_auth, get_vh_registered_users, [domain()]). -init_per_testcase(CaseName = cets_tables_are_in_mnesia_info, Config) -> - case rpc(mim(), ejabberd_sm, sm_backend, []) of - ejabberd_sm_cets -> - escalus:init_per_testcase(CaseName, Config); - _ -> - {skip, "Only for cets preset"} - end; init_per_testcase(CaseName, Config) when CaseName == delete_old_users_vhost orelse CaseName == stats_global @@ -1438,12 +1430,6 @@ remove_expired_messages_test(Config) -> 2 = length(SecondList) end). -cets_tables_are_in_mnesia_info(Config) -> - {Out, 0} = mongooseimctl("mnesia", ["info"], Config), - Lines = binary:split(iolist_to_binary(Out), <<"\n">>, [global]), - [_Line] = [L || <<"table=cets_session", _/binary>> = L <- Lines], - ok. - %%----------------------------------------------------------------- %% Helpers %%----------------------------------------------------------------- From e71c3291a7c3914673c8bcf4992b203f26a24e4a Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 31 Mar 2023 09:03:09 +0200 Subject: [PATCH 013/161] Expect two or three nodes in graphql_metric_SUITE:get_cluster_metrics_by_nonexistent_name --- big_tests/tests/graphql_metric_SUITE.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/big_tests/tests/graphql_metric_SUITE.erl b/big_tests/tests/graphql_metric_SUITE.erl index 3326dfaac7e..b50ff43fa8d 100644 --- a/big_tests/tests/graphql_metric_SUITE.erl +++ b/big_tests/tests/graphql_metric_SUITE.erl @@ -287,13 +287,13 @@ get_cluster_metrics_by_nonexistent_name(Config) -> Result = get_cluster_metrics_as_dicts_by_name([<<"nonexistent">>], Config), ParsedResult = get_ok_value([data, metric, getClusterMetricsAsDicts], Result), [#{<<"node">> := _, <<"result">> := []}, - #{<<"node">> := _, <<"result">> := []}] = ParsedResult. + #{<<"node">> := _, <<"result">> := []}|_] = ParsedResult. %% two or three nodes. get_cluster_metrics_with_nonexistent_key(Config) -> Result = get_cluster_metrics_as_dicts_with_keys([<<"nonexistent">>], Config), ParsedResult = get_ok_value([data, metric, getClusterMetricsAsDicts], Result), [#{<<"node">> := _, <<"result">> := [_|_]}, - #{<<"node">> := _, <<"result">> := [_|_]}] = ParsedResult. + #{<<"node">> := _, <<"result">> := [_|_]}|_] = ParsedResult. get_cluster_metrics_empty_args(Config) -> Node = atom_to_binary(maps:get(node, distributed_helper:mim2())), From 7c522efac8ffeadf6b87d5ad59805e01bd6006bc Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 31 Mar 2023 10:12:54 +0200 Subject: [PATCH 014/161] Fix error formatting in rest_helper --- big_tests/tests/rest_helper.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/big_tests/tests/rest_helper.erl b/big_tests/tests/rest_helper.erl index c98a09e87b6..b724c9f1111 100644 --- a/big_tests/tests/rest_helper.erl +++ b/big_tests/tests/rest_helper.erl @@ -36,7 +36,7 @@ assert_inlist(Pattern, L) -> Fl = lists:filter(fun(X) -> case X of Pattern -> true; _ -> false end end, L), case Fl of [] -> - ct:fail(io_lib:format("Fail: ~p not in [~p...]", [Pattern, H])); + ct:fail("Fail: ~p not in [~p...]", [Pattern, H]); _ -> Fl end. @@ -49,13 +49,13 @@ assert_notinlist(Pattern, L) -> [] -> ok; _ -> - ct:fail(io_lib:format("Fail: ~p in ~p", [Pattern, L])) + ct:fail("Fail: ~p in ~p", [Pattern, L]) end. assert_inmaplist([], Map, L, [H|_]) -> case L of [] -> - ct:fail(io_lib:format("Fail: ~p not in [~p...]", [Map, H])); + ct:fail("Fail: ~p not in [~p...]", [Map, H]); _ -> L end; @@ -70,7 +70,7 @@ assert_notinmaplist([], Map, L, [H|_]) -> [] -> ok; _ -> - ct:fail(io_lib:format("Fail: ~p in [~p...]", [Map, H])) + ct:fail("Fail: ~p in [~p...]", [Map, H]) end; assert_notinmaplist([K|Keys], Map, L, Orig) -> V = maps:get(K, Map), From 09d95f0eba59a18f6675c55b68d92e9934d0f88f Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 25 Apr 2023 08:53:57 +0200 Subject: [PATCH 015/161] Add GraphQL handler for CETS info --- big_tests/default.spec | 1 + big_tests/dynamic_domains.spec | 1 + big_tests/tests/graphql_cets_SUITE.erl | 77 +++++++++++++++++++ priv/graphql/schemas/admin/admin_schema.gql | 2 + priv/graphql/schemas/admin/cets.gql | 17 ++++ .../admin/mongoose_graphql_admin_query.erl | 2 + .../mongoose_graphql_cets_admin_query.erl | 23 ++++++ src/graphql/mongoose_graphql.erl | 1 + 8 files changed, 124 insertions(+) create mode 100644 big_tests/tests/graphql_cets_SUITE.erl create mode 100644 priv/graphql/schemas/admin/cets.gql create mode 100644 src/graphql/admin/mongoose_graphql_cets_admin_query.erl diff --git a/big_tests/default.spec b/big_tests/default.spec index 561599d3f76..09686a6f466 100644 --- a/big_tests/default.spec +++ b/big_tests/default.spec @@ -45,6 +45,7 @@ {suites, "tests", graphql_gdpr_SUITE}. {suites, "tests", graphql_token_SUITE}. {suites, "tests", graphql_mnesia_SUITE}. +{suites, "tests", graphql_cets_SUITE}. {suites, "tests", graphql_vcard_SUITE}. {suites, "tests", graphql_http_upload_SUITE}. {suites, "tests", graphql_server_SUITE}. diff --git a/big_tests/dynamic_domains.spec b/big_tests/dynamic_domains.spec index 73a309663c4..3710a19c6e5 100644 --- a/big_tests/dynamic_domains.spec +++ b/big_tests/dynamic_domains.spec @@ -63,6 +63,7 @@ {suites, "tests", graphql_gdpr_SUITE}. {suites, "tests", graphql_token_SUITE}. {suites, "tests", graphql_mnesia_SUITE}. +{suites, "tests", graphql_cets_SUITE}. {suites, "tests", graphql_http_upload_SUITE}. {suites, "tests", graphql_server_SUITE}. {suites, "tests", graphql_metric_SUITE}. diff --git a/big_tests/tests/graphql_cets_SUITE.erl b/big_tests/tests/graphql_cets_SUITE.erl new file mode 100644 index 00000000000..63302ed29cc --- /dev/null +++ b/big_tests/tests/graphql_cets_SUITE.erl @@ -0,0 +1,77 @@ +-module(graphql_cets_SUITE). +-include_lib("eunit/include/eunit.hrl"). + +-compile([export_all, nowarn_export_all]). + +-import(distributed_helper, [mim/0]). +-import(domain_helper, [host_type/1]). +-import(mongooseimctl_helper, [rpc_call/3]). +-import(graphql_helper, [execute_command/4, get_unauthorized/1, get_ok_value/2]). + +all() -> + [{group, admin_cets_cli}, + {group, admin_cets_http}, + {group, domain_admin_cets}]. + +groups() -> + [{admin_cets_http, [sequence], admin_cets_tests()}, + {admin_cets_cli, [sequence], admin_cets_tests()}, + {domain_admin_cets, [], domain_admin_tests()}]. + +admin_cets_tests() -> + [has_sm_table_in_info]. + +domain_admin_tests() -> + [domain_admin_get_info_test]. + +init_per_suite(Config) -> + Config1 = escalus:init_per_suite(Config), + ejabberd_node_utils:init(mim(), Config1). + +end_per_suite(Config) -> + escalus:end_per_suite(Config). + +init_per_group(admin_cets_http, Config) -> + graphql_helper:init_admin_handler(Config); +init_per_group(admin_cets_cli, Config) -> + graphql_helper:init_admin_cli(Config); +init_per_group(domain_admin_cets, Config) -> + graphql_helper:init_domain_admin_handler(Config). + +end_per_group(_, _Config) -> + graphql_helper:clean(), + escalus_fresh:clean(). + +init_per_testcase(has_sm_table_in_info, Config) -> + case rpc_call(ejabberd_sm, sm_backend, []) of + ejabberd_sm_cets -> + Config; + _ -> + {skip, "SM backend is not CETS"} + end; +init_per_testcase(_, Config) -> + Config. + +% Admin tests + +has_sm_table_in_info(Config) -> + Res = get_info(Config), + Tables = get_ok_value([data, cets, systemInfo], Res), + [T] = [T || T = #{<<"tableName">> := <<"cets_session">>} <- Tables], + #{<<"memory">> := Mem, <<"nodes">> := Nodes, <<"size">> := Size} = T, + true = is_integer(Mem), + true = is_integer(Size), + #{node := Node1} = mim(), + lists:member(Node1, Nodes). + +% Domain admin tests + +domain_admin_get_info_test(Config) -> + get_unauthorized(get_info(Config)). + +%-------------------------------------------------------------------------------------------------- +% Helpers +%-------------------------------------------------------------------------------------------------- + +get_info(Config) -> + execute_command(<<"cets">>, <<"systemInfo">>, #{}, Config). diff --git a/priv/graphql/schemas/admin/admin_schema.gql b/priv/graphql/schemas/admin/admin_schema.gql index 6f55d867275..59520901e49 100644 --- a/priv/graphql/schemas/admin/admin_schema.gql +++ b/priv/graphql/schemas/admin/admin_schema.gql @@ -39,6 +39,8 @@ type AdminQuery{ gdpr: GdprAdminQuery "Mnesia internal database management" mnesia: MnesiaAdminQuery + "CETS internal database management" + cets: CETSAdminQuery "Server info and management" server: ServerAdminQuery } diff --git a/priv/graphql/schemas/admin/cets.gql b/priv/graphql/schemas/admin/cets.gql new file mode 100644 index 00000000000..f9a8f8e74e4 --- /dev/null +++ b/priv/graphql/schemas/admin/cets.gql @@ -0,0 +1,17 @@ +"Allow admin to get information about CETS status" +type CETSAdminQuery @protected{ + "Get from the local node. Only for global admin" + systemInfo: [CETSInfo] + @protected(type: GLOBAL) +} + +type CETSInfo { + "ETS table name" + tableName: String + "Memory (in words)" + memory: Int + "Size (in records)" + size: Int + "A list of clustered nodes" + nodes: [String] +} diff --git a/src/graphql/admin/mongoose_graphql_admin_query.erl b/src/graphql/admin/mongoose_graphql_admin_query.erl index faf446b2a2f..6a8eae5d231 100644 --- a/src/graphql/admin/mongoose_graphql_admin_query.erl +++ b/src/graphql/admin/mongoose_graphql_admin_query.erl @@ -21,6 +21,8 @@ execute(_Ctx, _Obj, <<"metric">>, _Args) -> {ok, metric}; execute(_Ctx, _Obj, <<"mnesia">>, _Args) -> {ok, mnesia}; +execute(_Ctx, _Obj, <<"cets">>, _Args) -> + {ok, cets}; execute(_Ctx, _Obj, <<"muc">>, _Args) -> {ok, muc}; execute(_Ctx, _Obj, <<"muc_light">>, _Args) -> diff --git a/src/graphql/admin/mongoose_graphql_cets_admin_query.erl b/src/graphql/admin/mongoose_graphql_cets_admin_query.erl new file mode 100644 index 00000000000..394dbcc4c28 --- /dev/null +++ b/src/graphql/admin/mongoose_graphql_cets_admin_query.erl @@ -0,0 +1,23 @@ +-module(mongoose_graphql_cets_admin_query). +-behaviour(mongoose_graphql). + +-export([execute/4]). + +-import(mongoose_graphql_helper, [make_error/2]). + +-ignore_xref([execute/4]). + +-include("../mongoose_graphql_types.hrl"). + +execute(Ctx, cets, <<"systemInfo">>, _) -> + try cets_discovery:info(mongoose_cets_discovery) of + Tables -> + {ok, lists:map(fun process_result/1, Tables)} + catch _Class:Reason -> + make_error({Reason, <<"Failed to get CETS tables info">>}, Ctx) + end. + +process_result(#{memory := Memory, size := Size, nodes := Nodes, table := Tab}) -> + Nodes2 = [{ok, Node} || Node <- Nodes], + {ok, #{<<"memory">> => Memory, <<"size">> => Size, + <<"nodes">> => Nodes2, <<"tableName">> => Tab}}. diff --git a/src/graphql/mongoose_graphql.erl b/src/graphql/mongoose_graphql.erl index 6eaf02d69c9..cdff5cdd5ed 100644 --- a/src/graphql/mongoose_graphql.erl +++ b/src/graphql/mongoose_graphql.erl @@ -194,6 +194,7 @@ admin_mapping_rules() -> 'MUCLightAdminQuery' => mongoose_graphql_muc_light_admin_query, 'MnesiaAdminMutation' => mongoose_graphql_mnesia_admin_mutation, 'MnesiaAdminQuery' => mongoose_graphql_mnesia_admin_query, + 'CETSAdminQuery' => mongoose_graphql_cets_admin_query, 'OfflineAdminMutation' => mongoose_graphql_offline_admin_mutation, 'PrivateAdminMutation' => mongoose_graphql_private_admin_mutation, 'PrivateAdminQuery' => mongoose_graphql_private_admin_query, From 588f4c4eae27bbe0ae64f1b2ae84260ea19aac24 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 28 Apr 2023 09:17:46 +0200 Subject: [PATCH 016/161] Patch ejabberd_sm_cets to work with new SM API --- src/ejabberd_sm_cets.erl | 32 ++++++++------------------------ 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/src/ejabberd_sm_cets.erl b/src/ejabberd_sm_cets.erl index c5b51106c27..d9b31d523e7 100644 --- a/src/ejabberd_sm_cets.erl +++ b/src/ejabberd_sm_cets.erl @@ -10,8 +10,7 @@ get_sessions/1, get_sessions/2, get_sessions/3, - create_session/4, - update_session/4, + set_session/4, delete_session/4, cleanup/1, total_count/0, @@ -57,29 +56,14 @@ get_sessions(User, Server, Resource) -> %% We can detect duplicates on the merging step or on reading (or both). tuples_to_sessions(Xs). --spec create_session(User :: jid:luser(), - Server :: jid:lserver(), - Resource :: jid:lresource(), - Session :: ejabberd_sm:session()) -> ok | {error, term()}. -create_session(User, Server, Resource, Session) -> - case get_sessions(User, Server, Resource) of - [] -> - cets:insert(?TABLE, session_to_tuple(Session)); - Sessions when is_list(Sessions) -> - %% TODO merge_info function would be removed, once MIM-1875 is done - MergedSession = mongoose_session:merge_info - (Session, hd(lists:sort(Sessions))), - cets:insert(?TABLE, session_to_tuple(MergedSession)) - end. - --spec update_session(User :: jid:luser(), - Server :: jid:lserver(), - Resource :: jid:lresource(), - Session :: ejabberd_sm:session()) -> ok | {error, term()}. -update_session(_User, _Server, _Resource, Session) -> +-spec set_session(User :: jid:luser(), + Server :: jid:lserver(), + Resource :: jid:lresource(), + Session :: ejabberd_sm:session()) -> ok | {error, term()}. +set_session(_User, _Server, _Resource, Session) -> cets:insert(?TABLE, session_to_tuple(Session)). --spec delete_session(ejabberd_sm:sid(), +-spec delete_session(SID :: ejabberd_sm:sid(), User :: jid:luser(), Server :: jid:lserver(), Resource :: jid:lresource()) -> ok. @@ -95,7 +79,7 @@ cleanup(Node) -> cets:sync(?TABLE), %% This is a full table scan, but cleanup is rare. Tuples = ets:select(?TABLE, [{R, [Guard], ['$_']}]), - lists:foreach(fun({Key, _, _} = Tuple) -> + lists:foreach(fun({_Key, _, _} = Tuple) -> Session = tuple_to_session(Tuple), ejabberd_sm:run_session_cleanup_hook(Session) end, Tuples), From 2a880807ba698894fc5989d5b28d95c39b183c5a Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 10 May 2023 19:32:19 +0200 Subject: [PATCH 017/161] Use pgsql in cets preset --- .circleci/template.yml | 8 ++++---- big_tests/test.config | 22 +++++++++++++++++++--- tools/gh-actions-configure-preset.sh | 2 ++ 3 files changed, 25 insertions(+), 7 deletions(-) diff --git a/.circleci/template.yml b/.circleci/template.yml index 39537a18724..1d07693d9da 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -591,7 +591,7 @@ jobs: preset: type: enum enum: [internal_mnesia, mysql_redis, odbc_mssql_mnesia, ldap_mnesia, - elasticsearch_and_cassandra_mnesia, pgsql_mnesia, internal_cets] + elasticsearch_and_cassandra_mnesia, pgsql_mnesia, pgsql_cets] description: Preset to run default: internal_mnesia db: @@ -826,11 +826,11 @@ workflows: - otp_25_docker filters: *all_tags - big_tests_in_docker: - name: internal_cets_25 + name: pgsql_cets_25 executor: otp_25_redis context: mongooseim-org - preset: internal_cets - db: "mnesia cets" + preset: pgsql_cets + db: "mnesia postgres cets" requires: - otp_25_docker filters: *all_tags diff --git a/big_tests/test.config b/big_tests/test.config index 377df5020e4..adaeb9698d0 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -237,13 +237,29 @@ {outgoing_pools, "[outgoing_pools.redis.global_distrib] scope = \"global\" workers = 10"}]}, - {internal_cets, - [{dbs, [redis]}, + {pgsql_cets, + [{dbs, [redis, pgsql]}, {sm_backend, "\"cets\""}, {stream_management_backend, cets}, + {auth_method, "rdbms"}, {outgoing_pools, "[outgoing_pools.redis.global_distrib] scope = \"global\" - workers = 10"}]}, + workers = 10 +[outgoing_pools.rdbms.default] + scope = \"global\" + workers = 5 + connection.driver = \"pgsql\" + connection.host = \"localhost\" + connection.database = \"ejabberd\" + connection.username = \"ejabberd\" + connection.password = \"mongooseim_secret\" + connection.tls.required = true + connection.tls.cacertfile = \"priv/ssl/cacert.pem\" + connection.tls.server_name_indication.enabled = false"}, + {service_domain_db, ""}, + {mod_vcard, " backend = \"rdbms\" + host = \"vjud.@HOST@\"\n"}, + {mod_roster, " backend = \"rdbms\"\n"}]}, {pgsql_mnesia, [{dbs, [redis, pgsql]}, {auth_method, "rdbms"}, diff --git a/tools/gh-actions-configure-preset.sh b/tools/gh-actions-configure-preset.sh index 9096c5f9c08..5b1f1201e25 100755 --- a/tools/gh-actions-configure-preset.sh +++ b/tools/gh-actions-configure-preset.sh @@ -36,6 +36,8 @@ case "$PRESET" in export REL_CONFIG="with-mysql with-redis with-amqp_client" ;; pgsql_mnesia) export REL_CONFIG="with-pgsql" ;; + cets_mnesia) + export REL_CONFIG="with-pgsql" ;; riak_mnesia) export REL_CONFIG="with-riak" ;; ldap_mnesia) From 205d9977c4a02eb750c650d103ddf0e3504a3c12 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 10 May 2023 21:58:15 +0200 Subject: [PATCH 018/161] Add CETS disco backend with pgsql support --- priv/pg.sql | 6 +++ src/ejabberd_sup.erl | 4 +- src/mongoose_cets_discovery_rdbms.erl | 54 +++++++++++++++++++++++++++ 3 files changed, 63 insertions(+), 1 deletion(-) create mode 100644 src/mongoose_cets_discovery_rdbms.erl diff --git a/priv/pg.sql b/priv/pg.sql index d0525f57c84..ffd43591133 100644 --- a/priv/pg.sql +++ b/priv/pg.sql @@ -504,3 +504,9 @@ CREATE TABLE domain_events ( PRIMARY KEY(id) ); CREATE INDEX i_domain_events_domain ON domain_events(domain); + +CREATE TABLE discovery_nodes ( + node_name varchar(250), + updated_timestamp BIGINT NOT NULL, -- in microseconds + PRIMARY KEY (node_name) +); diff --git a/src/ejabberd_sup.erl b/src/ejabberd_sup.erl index 5b1cb95812d..c12b3e573c6 100644 --- a/src/ejabberd_sup.erl +++ b/src/ejabberd_sup.erl @@ -155,7 +155,9 @@ init([]) -> permanent, infinity, supervisor, [pg]}, ConfigDir = filename:dirname(mongoose_config:get_config_path()), DiscoFile = filename:join(ConfigDir, "cets_disco.txt"), - DiscoOpts = #{name => mongoose_cets_discovery, disco_file => DiscoFile}, + DiscoOpts = #{ + backend_module => mongoose_cets_discovery_rdbms, + name => mongoose_cets_discovery, disco_file => DiscoFile}, CetsDisco = {cets_discovery, {cets_discovery, start_link, [DiscoOpts]}, diff --git a/src/mongoose_cets_discovery_rdbms.erl b/src/mongoose_cets_discovery_rdbms.erl new file mode 100644 index 00000000000..436c28c395e --- /dev/null +++ b/src/mongoose_cets_discovery_rdbms.erl @@ -0,0 +1,54 @@ +%% @doc MongooseIM RDBMS backend for cets_discovery. +-module(mongoose_cets_discovery_rdbms). +-behaviour(cets_discovery). +-export([init/1, get_nodes/1]). + +-include_lib("kernel/include/logger.hrl"). + +-type opts() :: #{}. +-type state() :: opts(). + +-spec init(opts()) -> state(). +init(Opts) -> + Opts. + +-spec get_nodes(state()) -> {cets_discovery:get_nodes_result(), state()}. +get_nodes(State = #{}) -> + prepare(), + insert(), + try mongoose_rdbms:execute_successfully(global, cets_disco_select, []) of + {selected, Rows} -> + Nodes = [binary_to_atom(X, latin1) || {X} <- Rows, X =/= <<>>], + {{ok, Nodes}, State} + catch Class:Reason:Stacktrace -> + ?LOG_ERROR(#{ + what => discovery_failed_select, + class => Class, + reason => Reason, + stacktrace => Stacktrace + }), + {{error, Reason}, State} + end. + +prepare() -> + Filter = [<<"node_name">>], + Fields = [<<"updated_timestamp">>], + rdbms_queries:prepare_upsert(global, cets_disco_insert, discovery_nodes, + Filter ++ Fields, Fields, Filter), + mongoose_rdbms:prepare(cets_disco_select, discovery_nodes, [node_name], + <<"SELECT node_name FROM discovery_nodes">>). + +insert() -> + Node = atom_to_binary(node(), latin1), + Timestamp = os:system_time(microsecond), + try + {updated, _} = rdbms_queries:execute_upsert(global, cets_disco_insert, + [Node, Timestamp], [Timestamp], [Node]) + catch Class:Reason:Stacktrace -> + ?LOG_ERROR(#{ + what => discovery_failed_insert, + class => Class, + reason => Reason, + stacktrace => Stacktrace + }) + end. From 74b679ea4786fddffb1679cf3da7409f41cb5b59 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 15 May 2023 16:02:14 +0200 Subject: [PATCH 019/161] Allow to configure cluster_name --- big_tests/test.config | 2 ++ priv/pg.sql | 1 + rel/fed1.vars-toml.config | 1 + rel/files/mongooseim.toml | 2 ++ rel/mim1.vars-toml.config | 1 + rel/mim2.vars-toml.config | 1 + rel/mim3.vars-toml.config | 1 + rel/reg1.vars-toml.config | 2 ++ src/config/mongoose_config_spec.erl | 33 +++++++++++++++++++++++++++ src/ejabberd_sup.erl | 11 +++++++-- src/mongoose_cets_discovery_rdbms.erl | 21 +++++++++-------- 11 files changed, 65 insertions(+), 11 deletions(-) diff --git a/big_tests/test.config b/big_tests/test.config index adaeb9698d0..3ea5695ba62 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -242,6 +242,8 @@ {sm_backend, "\"cets\""}, {stream_management_backend, cets}, {auth_method, "rdbms"}, + {internal_databases, "[internal_databases.cets] + cluster_name = \"{{cluster_name}}\""}, {outgoing_pools, "[outgoing_pools.redis.global_distrib] scope = \"global\" workers = 10 diff --git a/priv/pg.sql b/priv/pg.sql index ffd43591133..1541cbec529 100644 --- a/priv/pg.sql +++ b/priv/pg.sql @@ -507,6 +507,7 @@ CREATE INDEX i_domain_events_domain ON domain_events(domain); CREATE TABLE discovery_nodes ( node_name varchar(250), + cluster_name varchar(250), updated_timestamp BIGINT NOT NULL, -- in microseconds PRIMARY KEY (node_name) ); diff --git a/rel/fed1.vars-toml.config b/rel/fed1.vars-toml.config index b6e34e8d401..34c4b2420f3 100644 --- a/rel/fed1.vars-toml.config +++ b/rel/fed1.vars-toml.config @@ -17,6 +17,7 @@ %% "localhost" host should NOT be defined. {hosts, "\"fed1\""}. {default_server_domain, "\"fed1\""}. +{cluster_name, "fed"}. %% domain.example.com is for multitenancy preset, muc_SUITE:register_over_s2s {s2s_addr, "[[s2s.address]] diff --git a/rel/files/mongooseim.toml b/rel/files/mongooseim.toml index eb6bc696879..fea8ca43c63 100644 --- a/rel/files/mongooseim.toml +++ b/rel/files/mongooseim.toml @@ -162,6 +162,8 @@ {{{auth_method_opts}}}{{/auth_method_opts}} {{/auth_method}} +{{{internal_databases}}} + {{#outgoing_pools}} {{{outgoing_pools}}} {{/outgoing_pools}} diff --git a/rel/mim1.vars-toml.config b/rel/mim1.vars-toml.config index 5fcea75ba4c..694052dcf66 100644 --- a/rel/mim1.vars-toml.config +++ b/rel/mim1.vars-toml.config @@ -20,6 +20,7 @@ {hosts, "\"localhost\", \"anonymous.localhost\", \"localhost.bis\""}. {host_types, "\"test type\", \"dummy auth\", \"anonymous\""}. {default_server_domain, "\"localhost\""}. +{cluster_name, "mim_main"}. {mod_amp, ""}. {host_config, diff --git a/rel/mim2.vars-toml.config b/rel/mim2.vars-toml.config index 2581a209a45..de866e27dd4 100644 --- a/rel/mim2.vars-toml.config +++ b/rel/mim2.vars-toml.config @@ -18,6 +18,7 @@ {hosts, "\"localhost\", \"anonymous.localhost\", \"localhost.bis\""}. {host_types, "\"test type\", \"dummy auth\""}. {default_server_domain, "\"localhost\""}. +{cluster_name, "mim_main"}. {s2s_addr, "[[s2s.address]] host = \"localhost2\" ip_address = \"127.0.0.1\""}. diff --git a/rel/mim3.vars-toml.config b/rel/mim3.vars-toml.config index 645ea41e1cc..ac6574072fb 100644 --- a/rel/mim3.vars-toml.config +++ b/rel/mim3.vars-toml.config @@ -20,6 +20,7 @@ {hosts, "\"localhost\", \"anonymous.localhost\", \"localhost.bis\""}. {default_server_domain, "\"localhost\""}. +{cluster_name, "mim_main"}. {s2s_addr, "[[s2s.address]] host = \"localhost2\" diff --git a/rel/reg1.vars-toml.config b/rel/reg1.vars-toml.config index 4b5a4e4fea7..da485e138c5 100644 --- a/rel/reg1.vars-toml.config +++ b/rel/reg1.vars-toml.config @@ -21,6 +21,8 @@ %% "reg1" is a local host. {hosts, "\"reg1\", \"localhost\""}. {default_server_domain, "\"reg1\""}. +{cluster_name, "reg"}. + {s2s_addr, "[[s2s.address]] host = \"localhost\" ip_address = \"127.0.0.1\" diff --git a/src/config/mongoose_config_spec.erl b/src/config/mongoose_config_spec.erl index 1ed67738942..e9f2b414662 100644 --- a/src/config/mongoose_config_spec.erl +++ b/src/config/mongoose_config_spec.erl @@ -91,6 +91,7 @@ root() -> <<"listen">> => Listen#section{include = always}, <<"auth">> => Auth#section{include = always}, <<"outgoing_pools">> => outgoing_pools(), + <<"internal_databases">> => internal_databases(), <<"services">> => services(), <<"modules">> => Modules#section{include = always}, <<"shaper">> => shaper(), @@ -425,6 +426,38 @@ auth_password() -> include = always }. +%% path: internal_databases +internal_databases() -> + Items = #{<<"cets">> => internal_database_cets(), + <<"mnesia">> => internal_database_mnesia()}, + #section{items = Items, + format_items = map, + wrap = global_config, + include = always}. + +%% path: internal_databases.*.* +internal_database_cets() -> + #section{ + items = #{<<"backend">> => #option{type = atom, + validate = {enum, [file, rdbms]}}, + <<"cluster_name">> => #option{type = atom, validate = non_empty}, + %% Relative to the config directory (or an absolute name) + <<"nodelist_file">> => #option{type = string, + validate = filename} + }, + defaults = #{<<"backend">> => rdbms, <<"cluster_name">> => mongooseim, + <<"nodelist_file">> => "cets_disco.txt"}, + include = always + }. + +%% path: internal_databases.*.* +internal_database_mnesia() -> + #section{ + items = #{}, + defaults = #{}, + include = always + }. + %% path: outgoing_pools outgoing_pools() -> PoolTypes = [<<"cassandra">>, <<"elastic">>, <<"http">>, <<"ldap">>, diff --git a/src/ejabberd_sup.erl b/src/ejabberd_sup.erl index c12b3e573c6..5e9efbfe0d7 100644 --- a/src/ejabberd_sup.erl +++ b/src/ejabberd_sup.erl @@ -154,9 +154,13 @@ init([]) -> {pg, start_link, [mim_scope]}, permanent, infinity, supervisor, [pg]}, ConfigDir = filename:dirname(mongoose_config:get_config_path()), - DiscoFile = filename:join(ConfigDir, "cets_disco.txt"), + #{backend := DiscoBackend, cluster_name := ClusterName, + nodelist_file := NodeFile} = + mongoose_config:get_opt([internal_databases, cets]), + DiscoFile = filename:join(ConfigDir, NodeFile), DiscoOpts = #{ - backend_module => mongoose_cets_discovery_rdbms, + backend_module => disco_backend_to_module(DiscoBackend), + cluster_name => atom_to_binary(ClusterName), name => mongoose_cets_discovery, disco_file => DiscoFile}, CetsDisco = {cets_discovery, @@ -197,3 +201,6 @@ stop_child(Proc) -> supervisor:terminate_child(ejabberd_sup, Proc), supervisor:delete_child(ejabberd_sup, Proc), ok. + +disco_backend_to_module(rdbms) -> mongoose_cets_discovery_rdbms; +disco_backend_to_module(file) -> cets_discovery_file. diff --git a/src/mongoose_cets_discovery_rdbms.erl b/src/mongoose_cets_discovery_rdbms.erl index 436c28c395e..e18e3c8bd9e 100644 --- a/src/mongoose_cets_discovery_rdbms.erl +++ b/src/mongoose_cets_discovery_rdbms.erl @@ -9,14 +9,14 @@ -type state() :: opts(). -spec init(opts()) -> state(). -init(Opts) -> +init(Opts = #{cluster_name := _}) -> Opts. -spec get_nodes(state()) -> {cets_discovery:get_nodes_result(), state()}. -get_nodes(State = #{}) -> +get_nodes(State = #{cluster_name := ClusterName}) -> prepare(), - insert(), - try mongoose_rdbms:execute_successfully(global, cets_disco_select, []) of + insert(ClusterName), + try mongoose_rdbms:execute_successfully(global, cets_disco_select, [ClusterName]) of {selected, Rows} -> Nodes = [binary_to_atom(X, latin1) || {X} <- Rows, X =/= <<>>], {{ok, Nodes}, State} @@ -31,19 +31,22 @@ get_nodes(State = #{}) -> end. prepare() -> - Filter = [<<"node_name">>], + Filter = [<<"node_name">>, <<"cluster_name">>], Fields = [<<"updated_timestamp">>], rdbms_queries:prepare_upsert(global, cets_disco_insert, discovery_nodes, Filter ++ Fields, Fields, Filter), - mongoose_rdbms:prepare(cets_disco_select, discovery_nodes, [node_name], - <<"SELECT node_name FROM discovery_nodes">>). + mongoose_rdbms:prepare(cets_disco_select, discovery_nodes, [cluster_name], + <<"SELECT node_name FROM discovery_nodes WHERE cluster_name = ?">>). -insert() -> +insert(ClusterName) -> Node = atom_to_binary(node(), latin1), Timestamp = os:system_time(microsecond), + Filter = [Node, ClusterName], + Fields = [Timestamp], try {updated, _} = rdbms_queries:execute_upsert(global, cets_disco_insert, - [Node, Timestamp], [Timestamp], [Node]) + Filter ++ Fields, Fields, + Filter) catch Class:Reason:Stacktrace -> ?LOG_ERROR(#{ what => discovery_failed_insert, From 86bc534d41dfaa0a00d331f19a4adbfef4687093 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 15 May 2023 16:06:42 +0200 Subject: [PATCH 020/161] Add discovery_nodes table for mysql/mssql --- priv/mssql2012.sql | 7 +++++++ priv/mysql.sql | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/priv/mssql2012.sql b/priv/mssql2012.sql index 351939358f7..d2a9fa0cc43 100644 --- a/priv/mssql2012.sql +++ b/priv/mssql2012.sql @@ -752,3 +752,10 @@ CREATE TABLE domain_events ( domain VARCHAR(250) NOT NULL ); CREATE INDEX i_domain_events_domain ON domain_events(domain); + +CREATE TABLE discovery_nodes ( + node_name varchar(250), + cluster_name varchar(250), + updated_timestamp BIGINT NOT NULL, -- in microseconds + PRIMARY KEY (node_name) +); diff --git a/priv/mysql.sql b/priv/mysql.sql index f3f7ed00023..648c6c8021f 100644 --- a/priv/mysql.sql +++ b/priv/mysql.sql @@ -544,3 +544,10 @@ CREATE TABLE domain_events ( domain VARCHAR(250) NOT NULL ); CREATE INDEX i_domain_events_domain ON domain_events(domain); + +CREATE TABLE discovery_nodes ( + node_name varchar(250), + cluster_name varchar(250), + updated_timestamp BIGINT NOT NULL, -- in microseconds + PRIMARY KEY (node_name) +); From fce3b67d4c80bb9719fef6c7355f10db60cbbdf2 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 15 May 2023 16:33:13 +0200 Subject: [PATCH 021/161] Enable PgSQL for pgsql_cets preset on CircleCI --- .circleci/template.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/template.yml b/.circleci/template.yml index 1d07693d9da..510396408a6 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -827,7 +827,7 @@ workflows: filters: *all_tags - big_tests_in_docker: name: pgsql_cets_25 - executor: otp_25_redis + executor: otp_25_pgsql_redis context: mongooseim-org preset: pgsql_cets db: "mnesia postgres cets" From 9799dbd5851853dd82e55934c64982ee73637b56 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 15 May 2023 16:36:30 +0200 Subject: [PATCH 022/161] add cluster_name into opts spec --- src/mongoose_cets_discovery_rdbms.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mongoose_cets_discovery_rdbms.erl b/src/mongoose_cets_discovery_rdbms.erl index e18e3c8bd9e..5fae4eaeec4 100644 --- a/src/mongoose_cets_discovery_rdbms.erl +++ b/src/mongoose_cets_discovery_rdbms.erl @@ -5,7 +5,7 @@ -include_lib("kernel/include/logger.hrl"). --type opts() :: #{}. +-type opts() :: #{cluster_name => binary()}. -type state() :: opts(). -spec init(opts()) -> state(). From 5082295f44c8c81d9ddd2a5242f070736a7ed67f Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 15 May 2023 18:04:13 +0200 Subject: [PATCH 023/161] Fix primary key in schema --- priv/mssql2012.sql | 2 +- priv/mysql.sql | 2 +- priv/pg.sql | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/priv/mssql2012.sql b/priv/mssql2012.sql index d2a9fa0cc43..605aa69bb85 100644 --- a/priv/mssql2012.sql +++ b/priv/mssql2012.sql @@ -757,5 +757,5 @@ CREATE TABLE discovery_nodes ( node_name varchar(250), cluster_name varchar(250), updated_timestamp BIGINT NOT NULL, -- in microseconds - PRIMARY KEY (node_name) + PRIMARY KEY (cluster_name, node_name) ); diff --git a/priv/mysql.sql b/priv/mysql.sql index 648c6c8021f..4b619a3b8b7 100644 --- a/priv/mysql.sql +++ b/priv/mysql.sql @@ -549,5 +549,5 @@ CREATE TABLE discovery_nodes ( node_name varchar(250), cluster_name varchar(250), updated_timestamp BIGINT NOT NULL, -- in microseconds - PRIMARY KEY (node_name) + PRIMARY KEY (cluster_name, node_name) ); diff --git a/priv/pg.sql b/priv/pg.sql index 1541cbec529..7aa5924ec04 100644 --- a/priv/pg.sql +++ b/priv/pg.sql @@ -509,5 +509,5 @@ CREATE TABLE discovery_nodes ( node_name varchar(250), cluster_name varchar(250), updated_timestamp BIGINT NOT NULL, -- in microseconds - PRIMARY KEY (node_name) + PRIMARY KEY (cluster_name, node_name) ); From 309c09cd7db2ad6290b824d684f2065a80f24cea Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 15 May 2023 18:23:13 +0200 Subject: [PATCH 024/161] Update config_parser_SUITE with new internal_databases option --- test/common/config_parser_helper.erl | 30 ++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/test/common/config_parser_helper.erl b/test/common/config_parser_helper.erl index b0de4bd1d76..bd503eb1c48 100644 --- a/test/common/config_parser_helper.erl +++ b/test/common/config_parser_helper.erl @@ -14,6 +14,11 @@ options("host_types") -> [<<"this is host type">>, <<"some host type">>, <<"another host type">>, <<"yet another host type">>]}, {hosts, [<<"localhost">>]}, + {internal_databases, + #{cets => + #{backend => rdbms, cluster_name => mongooseim, + nodelist_file => "cets_disco.txt"}, + mnesia => #{}}}, {language, <<"en">>}, {listen, []}, {loglevel, warning}, @@ -62,6 +67,11 @@ options("miscellaneous") -> {hide_service_name, true}, {host_types, []}, {hosts, [<<"localhost">>, <<"anonymous.localhost">>]}, + {internal_databases, + #{cets => + #{backend => rdbms, cluster_name => mongooseim, + nodelist_file => "cets_disco.txt"}, + mnesia => #{}}}, {language, <<"en">>}, {listen, [config([listen, http], @@ -106,6 +116,11 @@ options("modules") -> {hide_service_name, false}, {host_types, []}, {hosts, [<<"localhost">>, <<"dummy_host">>]}, + {internal_databases, + #{cets => + #{backend => rdbms, cluster_name => mongooseim, + nodelist_file => "cets_disco.txt"}, + mnesia => #{}}}, {language, <<"en">>}, {listen, []}, {loglevel, warning}, @@ -131,6 +146,11 @@ options("mongooseim-pgsql") -> {host_types, []}, {hosts, [<<"localhost">>, <<"anonymous.localhost">>, <<"localhost.bis">>]}, + {internal_databases, + #{cets => + #{backend => rdbms, cluster_name => mongooseim, + nodelist_file => "cets_disco.txt"}, + mnesia => #{}}}, {language, <<"en">>}, {listen, [config([listen, c2s], @@ -299,6 +319,11 @@ options("outgoing_pools") -> {host_types, []}, {hosts, [<<"localhost">>, <<"anonymous.localhost">>, <<"localhost.bis">>]}, + {internal_databases, + #{cets => + #{backend => rdbms, cluster_name => mongooseim, + nodelist_file => "cets_disco.txt"}, + mnesia => #{}}}, {language, <<"en">>}, {listen, []}, {loglevel, warning}, @@ -370,6 +395,11 @@ options("s2s_only") -> {hide_service_name, false}, {host_types, []}, {hosts, [<<"localhost">>, <<"dummy_host">>]}, + {internal_databases, + #{cets => + #{backend => rdbms, cluster_name => mongooseim, + nodelist_file => "cets_disco.txt"}, + mnesia => #{}}}, {language, <<"en">>}, {listen, []}, {loglevel, warning}, From 6a813cc09c1e1e23febc18faaca99fc3999dff45 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 15 May 2023 20:14:13 +0200 Subject: [PATCH 025/161] Update load_from_file test --- test/mongoose_config_SUITE.erl | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/mongoose_config_SUITE.erl b/test/mongoose_config_SUITE.erl index 87d48955685..75e2f778ecb 100644 --- a/test/mongoose_config_SUITE.erl +++ b/test/mongoose_config_SUITE.erl @@ -177,6 +177,11 @@ minimal_config_opts() -> {hide_service_name, false}, {host_types, []}, {hosts, [<<"localhost">>]}, + {internal_databases, + #{cets => + #{backend => rdbms, cluster_name => mongooseim, + nodelist_file => "cets_disco.txt"}, + mnesia => #{}}}, {language, <<"en">>}, {listen, []}, {loglevel, warning}, From 7e4dfc2855af16efe577ae6e7e2b7d933fbf4a71 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 16 May 2023 10:15:34 +0200 Subject: [PATCH 026/161] Add tests for cets disco (unit big tests) --- big_tests/default.spec | 1 + big_tests/dynamic_domains.spec | 1 + big_tests/tests/cets_disco_SUITE.erl | 72 +++++++++++++++++++ .../tests/cets_disco_SUITE_data/nodes.txt | 2 + src/ejabberd_sup.erl | 1 + src/mongoose_cets_discovery_rdbms.erl | 11 ++- 6 files changed, 82 insertions(+), 6 deletions(-) create mode 100644 big_tests/tests/cets_disco_SUITE.erl create mode 100644 big_tests/tests/cets_disco_SUITE_data/nodes.txt diff --git a/big_tests/default.spec b/big_tests/default.spec index 09686a6f466..7cd97facc0e 100644 --- a/big_tests/default.spec +++ b/big_tests/default.spec @@ -116,6 +116,7 @@ {suites, "tests", dynamic_domains_SUITE}. {suites, "tests", local_iq_SUITE}. {suites, "tests", tcp_listener_SUITE}. +{suites, "tests", cets_disco_SUITE}. {config, ["test.config"]}. {logdir, "ct_report"}. diff --git a/big_tests/dynamic_domains.spec b/big_tests/dynamic_domains.spec index 3710a19c6e5..8ec9e987850 100644 --- a/big_tests/dynamic_domains.spec +++ b/big_tests/dynamic_domains.spec @@ -158,6 +158,7 @@ {suites, "tests", domain_removal_SUITE}. {suites, "tests", local_iq_SUITE}. {suites, "tests", tcp_listener_SUITE}. +{suites, "tests", cets_disco_SUITE}. {config, ["dynamic_domains.config", "test.config"]}. diff --git a/big_tests/tests/cets_disco_SUITE.erl b/big_tests/tests/cets_disco_SUITE.erl new file mode 100644 index 00000000000..b8efd04986e --- /dev/null +++ b/big_tests/tests/cets_disco_SUITE.erl @@ -0,0 +1,72 @@ +-module(cets_disco_SUITE). +-compile([export_all, nowarn_export_all]). + +-import(distributed_helper, [mim/0, rpc/4]). +-include_lib("common_test/include/ct.hrl"). + +%%-------------------------------------------------------------------- +%% Suite configuration +%%-------------------------------------------------------------------- + +all() -> + [{group, file}, {group, rdbms}]. + +groups() -> + [{file, [], file_cases()}, + {rdbms, [], rdbms_cases()}]. + +file_cases() -> + [file_backend]. + +rdbms_cases() -> + [rdbms_backend]. + +suite() -> + escalus:suite(). + +%%-------------------------------------------------------------------- +%% Init & teardown +%%-------------------------------------------------------------------- +init_per_suite(Config) -> + escalus:init_per_suite(Config). + +end_per_suite(Config) -> + escalus:end_per_suite(Config). + +init_per_group(rdbms, Config) -> + case not ct_helper:is_ct_running() + orelse mongoose_helper:is_rdbms_enabled(domain_helper:host_type()) of + false -> {skip, rdbms_or_ct_not_running}; + true -> Config + end; +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(CaseName, Config) -> + escalus:init_per_testcase(CaseName, Config). + +end_per_testcase(CaseName, Config) -> + escalus:end_per_testcase(CaseName, Config). + +%%-------------------------------------------------------------------- +%% Test cases +%%-------------------------------------------------------------------- + +file_backend(Config) -> + Path = filename:join(?config(mim_data_dir, Config), "nodes.txt"), + Opts = #{disco_file => Path}, + State = rpc(mim(), cets_discovery_file, init, [Opts]), + {{ok, Nodes}, _} = rpc(mim(), cets_discovery_file, get_nodes, [State]), + ['node1@localhost', 'node2@otherhost'] = lists:sort(Nodes). + +rdbms_backend(_Config) -> + Opts1 = #{cluster_name => <<"big_test">>, node_name_to_insert => <<"test1">>}, + Opts2 = #{cluster_name => <<"big_test">>, node_name_to_insert => <<"test2">>}, + State1 = rpc(mim(), mongoose_cets_discovery_rdbms, init, [Opts1]), + rpc(mim(), mongoose_cets_discovery_rdbms, get_nodes, [State1]), + State2 = rpc(mim(), mongoose_cets_discovery_rdbms, init, [Opts2]), + {{ok, Nodes}, _} = rpc(mim(), mongoose_cets_discovery_rdbms, get_nodes, [State2]), + [test1, test2] = lists:sort(Nodes). diff --git a/big_tests/tests/cets_disco_SUITE_data/nodes.txt b/big_tests/tests/cets_disco_SUITE_data/nodes.txt new file mode 100644 index 00000000000..8e85e526bd8 --- /dev/null +++ b/big_tests/tests/cets_disco_SUITE_data/nodes.txt @@ -0,0 +1,2 @@ +node1@localhost +node2@otherhost diff --git a/src/ejabberd_sup.erl b/src/ejabberd_sup.erl index 5e9efbfe0d7..71ae74682cb 100644 --- a/src/ejabberd_sup.erl +++ b/src/ejabberd_sup.erl @@ -161,6 +161,7 @@ init([]) -> DiscoOpts = #{ backend_module => disco_backend_to_module(DiscoBackend), cluster_name => atom_to_binary(ClusterName), + node_name_to_insert => atom_to_binary(node(), latin1), name => mongoose_cets_discovery, disco_file => DiscoFile}, CetsDisco = {cets_discovery, diff --git a/src/mongoose_cets_discovery_rdbms.erl b/src/mongoose_cets_discovery_rdbms.erl index 5fae4eaeec4..2462f02ed0d 100644 --- a/src/mongoose_cets_discovery_rdbms.erl +++ b/src/mongoose_cets_discovery_rdbms.erl @@ -5,17 +5,17 @@ -include_lib("kernel/include/logger.hrl"). --type opts() :: #{cluster_name => binary()}. +-type opts() :: #{cluster_name => binary(), node_name_to_insert => binary()}. -type state() :: opts(). -spec init(opts()) -> state(). -init(Opts = #{cluster_name := _}) -> +init(Opts = #{cluster_name := _, node_name_to_insert := _}) -> Opts. -spec get_nodes(state()) -> {cets_discovery:get_nodes_result(), state()}. -get_nodes(State = #{cluster_name := ClusterName}) -> +get_nodes(State = #{cluster_name := ClusterName, node_name_to_insert := Node}) -> prepare(), - insert(ClusterName), + insert(ClusterName, Node), try mongoose_rdbms:execute_successfully(global, cets_disco_select, [ClusterName]) of {selected, Rows} -> Nodes = [binary_to_atom(X, latin1) || {X} <- Rows, X =/= <<>>], @@ -38,8 +38,7 @@ prepare() -> mongoose_rdbms:prepare(cets_disco_select, discovery_nodes, [cluster_name], <<"SELECT node_name FROM discovery_nodes WHERE cluster_name = ?">>). -insert(ClusterName) -> - Node = atom_to_binary(node(), latin1), +insert(ClusterName, Node) -> Timestamp = os:system_time(microsecond), Filter = [Node, ClusterName], Fields = [Timestamp], From 7c0e0ecb43b327320df75773454f5b37517945da Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 22 May 2023 10:05:29 +0200 Subject: [PATCH 027/161] Make internal_databases block explicit --- rel/vars-toml.config | 2 + src/config/mongoose_config_spec.erl | 9 ++--- src/ejabberd_sup.erl | 38 +++++++++++-------- test/common/config_parser_helper.erl | 24 ++---------- test/config_parser_SUITE.erl | 24 ++++++++++++ .../miscellaneous.toml | 4 ++ test/config_parser_SUITE_data/modules.toml | 3 ++ .../mongooseim-pgsql.toml | 4 ++ test/mongoose_config_SUITE.erl | 6 +-- 9 files changed, 68 insertions(+), 46 deletions(-) diff --git a/rel/vars-toml.config b/rel/vars-toml.config index d17f75d1fcb..4d5e1b235b1 100644 --- a/rel/vars-toml.config +++ b/rel/vars-toml.config @@ -17,6 +17,8 @@ {http_api_client_endpoint, "port = {{ http_api_client_endpoint_port }}"}. {s2s_use_starttls, "\"optional\""}. {s2s_certfile, "\"priv/ssl/fake_server.pem\""}. +{internal_databases, "[internal_databases] + [internal_databases.mnesia]"}. "./configure.vars.config". diff --git a/src/config/mongoose_config_spec.erl b/src/config/mongoose_config_spec.erl index e9f2b414662..b5893caf872 100644 --- a/src/config/mongoose_config_spec.erl +++ b/src/config/mongoose_config_spec.erl @@ -433,7 +433,8 @@ internal_databases() -> #section{items = Items, format_items = map, wrap = global_config, - include = always}. + include = always, + defaults = #{}}. %% path: internal_databases.*.* internal_database_cets() -> @@ -446,16 +447,14 @@ internal_database_cets() -> validate = filename} }, defaults = #{<<"backend">> => rdbms, <<"cluster_name">> => mongooseim, - <<"nodelist_file">> => "cets_disco.txt"}, - include = always + <<"nodelist_file">> => "cets_disco.txt"} }. %% path: internal_databases.*.* internal_database_mnesia() -> #section{ items = #{}, - defaults = #{}, - include = always + defaults = #{} }. %% path: outgoing_pools diff --git a/src/ejabberd_sup.erl b/src/ejabberd_sup.erl index 71ae74682cb..a097b5b64c3 100644 --- a/src/ejabberd_sup.erl +++ b/src/ejabberd_sup.erl @@ -153,23 +153,9 @@ init([]) -> {pg, {pg, start_link, [mim_scope]}, permanent, infinity, supervisor, [pg]}, - ConfigDir = filename:dirname(mongoose_config:get_config_path()), - #{backend := DiscoBackend, cluster_name := ClusterName, - nodelist_file := NodeFile} = - mongoose_config:get_opt([internal_databases, cets]), - DiscoFile = filename:join(ConfigDir, NodeFile), - DiscoOpts = #{ - backend_module => disco_backend_to_module(DiscoBackend), - cluster_name => atom_to_binary(ClusterName), - node_name_to_insert => atom_to_binary(node(), latin1), - name => mongoose_cets_discovery, disco_file => DiscoFile}, - CetsDisco = - {cets_discovery, - {cets_discovery, start_link, [DiscoOpts]}, - permanent, infinity, supervisor, [cets_discovery]}, {ok, {{one_for_one, 10, 1}, - [CetsDisco, - PG, + cets_specs() ++ + [PG, Hooks, Cleaner, SMBackendSupervisor, @@ -203,5 +189,25 @@ stop_child(Proc) -> supervisor:delete_child(ejabberd_sup, Proc), ok. +cets_specs() -> + cets_specs(mongoose_config:get_opt([internal_databases, cets], disabled)). + +cets_specs(disabled) -> + []; +cets_specs(#{backend := DiscoBackend, cluster_name := ClusterName, + nodelist_file := NodeFile}) -> + ConfigDir = filename:dirname(mongoose_config:get_config_path()), + DiscoFile = filename:join(ConfigDir, NodeFile), + DiscoOpts = #{ + backend_module => disco_backend_to_module(DiscoBackend), + cluster_name => atom_to_binary(ClusterName), + node_name_to_insert => atom_to_binary(node(), latin1), + name => mongoose_cets_discovery, disco_file => DiscoFile}, + CetsDisco = + {cets_discovery, + {cets_discovery, start_link, [DiscoOpts]}, + permanent, infinity, supervisor, [cets_discovery]}, + [CetsDisco]. + disco_backend_to_module(rdbms) -> mongoose_cets_discovery_rdbms; disco_backend_to_module(file) -> cets_discovery_file. diff --git a/test/common/config_parser_helper.erl b/test/common/config_parser_helper.erl index bd503eb1c48..04d5cee0a3e 100644 --- a/test/common/config_parser_helper.erl +++ b/test/common/config_parser_helper.erl @@ -14,11 +14,7 @@ options("host_types") -> [<<"this is host type">>, <<"some host type">>, <<"another host type">>, <<"yet another host type">>]}, {hosts, [<<"localhost">>]}, - {internal_databases, - #{cets => - #{backend => rdbms, cluster_name => mongooseim, - nodelist_file => "cets_disco.txt"}, - mnesia => #{}}}, + {internal_databases, #{}}, {language, <<"en">>}, {listen, []}, {loglevel, warning}, @@ -116,11 +112,7 @@ options("modules") -> {hide_service_name, false}, {host_types, []}, {hosts, [<<"localhost">>, <<"dummy_host">>]}, - {internal_databases, - #{cets => - #{backend => rdbms, cluster_name => mongooseim, - nodelist_file => "cets_disco.txt"}, - mnesia => #{}}}, + {internal_databases, #{mnesia => #{}}}, {language, <<"en">>}, {listen, []}, {loglevel, warning}, @@ -319,11 +311,7 @@ options("outgoing_pools") -> {host_types, []}, {hosts, [<<"localhost">>, <<"anonymous.localhost">>, <<"localhost.bis">>]}, - {internal_databases, - #{cets => - #{backend => rdbms, cluster_name => mongooseim, - nodelist_file => "cets_disco.txt"}, - mnesia => #{}}}, + {internal_databases, #{}}, {language, <<"en">>}, {listen, []}, {loglevel, warning}, @@ -395,11 +383,7 @@ options("s2s_only") -> {hide_service_name, false}, {host_types, []}, {hosts, [<<"localhost">>, <<"dummy_host">>]}, - {internal_databases, - #{cets => - #{backend => rdbms, cluster_name => mongooseim, - nodelist_file => "cets_disco.txt"}, - mnesia => #{}}}, + {internal_databases, #{}}, {language, <<"en">>}, {listen, []}, {loglevel, warning}, diff --git a/test/config_parser_SUITE.erl b/test/config_parser_SUITE.erl index be5ce8ed65b..32442bbf5ec 100644 --- a/test/config_parser_SUITE.erl +++ b/test/config_parser_SUITE.erl @@ -52,6 +52,7 @@ all() -> {group, listen}, {group, auth}, {group, pool}, + {group, internal_databases}, {group, shaper_acl_access}, {group, s2s}, {group, modules}, @@ -147,6 +148,7 @@ groups() -> pool_rabbit_connection, pool_ldap, pool_ldap_connection]}, + {internal_databases, [parallel], [internal_database_cets]}, {shaper_acl_access, [parallel], [shaper, acl, acl_merge_host_and_global, @@ -1200,6 +1202,28 @@ test_fast_tls_server(P, T) -> ?err(T(#{<<"versions">> => [<<"tlsv1.2">>]})), % option only for just_tls ?err(T(#{<<"protocol_options">> => [<<>>]})). +%% tests: internal_databases + +internal_database_cets(_Config) -> + CetsEnabled = #{<<"internal_databases">> => #{<<"cets">> => #{}}}, + %% No internal_databases section means an empty list of databases + ?cfg([internal_databases], #{}, #{}), % default + %% Empty internal_databases could be configured explicitly + ?cfg([internal_databases], #{}, #{<<"internal_databases">> => #{}}), + + ?cfg([internal_databases, cets, backend], file, + #{<<"internal_databases">> => #{<<"cets">> => #{<<"backend">> => <<"file">>}}}), + ?cfg([internal_databases, cets, backend], rdbms, + #{<<"internal_databases">> => #{<<"cets">> => #{<<"cluster_name">> => <<"test">>}}}), + + ?cfg([internal_databases, cets, cluster_name], mongooseim, CetsEnabled), + ?cfg([internal_databases, cets, nodelist_file], "cets_disco.txt", CetsEnabled), + %% If only mnesia section is defined, CETS section is not included + ?cfg([internal_databases], #{mnesia => #{}}, + #{<<"internal_databases">> => #{<<"mnesia">> => #{}}}), + ?err(#{<<"internal_databases">> => #{<<"cets">> => #{<<"backend">> => <<"mnesia">>}}}), + ?err(#{<<"internal_databases">> => #{<<"cets">> => #{<<"cluster_name">> => 123}}}). + %% tests: shaper, acl, access shaper(_Config) -> ?cfg([shaper, normal], #{max_rate => 1000}, diff --git a/test/config_parser_SUITE_data/miscellaneous.toml b/test/config_parser_SUITE_data/miscellaneous.toml index b94d35fa089..15ea205eac6 100644 --- a/test/config_parser_SUITE_data/miscellaneous.toml +++ b/test/config_parser_SUITE_data/miscellaneous.toml @@ -80,3 +80,7 @@ periodic_report = 300_000 tracking_id.id = "G-12345678" tracking_id.secret = "Secret" + +[internal_databases] + [internal_databases.mnesia] + [internal_databases.cets] diff --git a/test/config_parser_SUITE_data/modules.toml b/test/config_parser_SUITE_data/modules.toml index f11a1a4d6a5..35713d01edd 100644 --- a/test/config_parser_SUITE_data/modules.toml +++ b/test/config_parser_SUITE_data/modules.toml @@ -5,6 +5,9 @@ ] default_server_domain = "localhost" +[internal_databases] + [internal_databases.mnesia] + [modules.mod_adhoc] iqdisc.type = "one_queue" report_commands_node = true diff --git a/test/config_parser_SUITE_data/mongooseim-pgsql.toml b/test/config_parser_SUITE_data/mongooseim-pgsql.toml index ff43dc17e02..327087cc16d 100644 --- a/test/config_parser_SUITE_data/mongooseim-pgsql.toml +++ b/test/config_parser_SUITE_data/mongooseim-pgsql.toml @@ -12,6 +12,10 @@ sm_backend = "mnesia" max_fsm_queue = 1000 +[internal_databases] + [internal_databases.mnesia] + [internal_databases.cets] + [[listen.http]] port = 5280 transport.num_acceptors = 10 diff --git a/test/mongoose_config_SUITE.erl b/test/mongoose_config_SUITE.erl index 75e2f778ecb..d8ea28afda8 100644 --- a/test/mongoose_config_SUITE.erl +++ b/test/mongoose_config_SUITE.erl @@ -177,11 +177,7 @@ minimal_config_opts() -> {hide_service_name, false}, {host_types, []}, {hosts, [<<"localhost">>]}, - {internal_databases, - #{cets => - #{backend => rdbms, cluster_name => mongooseim, - nodelist_file => "cets_disco.txt"}, - mnesia => #{}}}, + {internal_databases, #{}}, {language, <<"en">>}, {listen, []}, {loglevel, warning}, From 058c188d3ead14de7cebfc9340556cb581f75063 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 22 May 2023 22:09:33 +0200 Subject: [PATCH 028/161] Docs for internal databases block --- doc/configuration/configuration-files.md | 1 + doc/configuration/internal-databases.md | 58 ++++++++++++++++++++++++ mkdocs.yml | 1 + 3 files changed, 60 insertions(+) create mode 100644 doc/configuration/internal-databases.md diff --git a/doc/configuration/configuration-files.md b/doc/configuration/configuration-files.md index 35712ed2021..75b4e6ed366 100644 --- a/doc/configuration/configuration-files.md +++ b/doc/configuration/configuration-files.md @@ -15,6 +15,7 @@ The file is divided into the following sections: * [**general**](general.md) - Served XMPP domains, log level, server language and some other miscellaneous settings. * [**listen**](listen.md) - Configured listeners, receiving incoming XMPP and HTTP connections. * [**auth**](auth.md) - Supported client authentication methods and their options. +* [**internal_databases**](internal-databases.md) - Options for Mnesia and CETS. They are primarily used for clustering. * [**outgoing_pools**](outgoing-connections.md) - Outgoing connections to external services, including databases, message queues and HTTP services. * [**services**](Services.md) - Internal services like an administration API and system metrics. * [**modules**](Modules.md) - [XMPP extension](https://xmpp.org/extensions/) modules, which extend the basic functionality provided by XMPP. diff --git a/doc/configuration/internal-databases.md b/doc/configuration/internal-databases.md new file mode 100644 index 00000000000..1db21a0f396 --- /dev/null +++ b/doc/configuration/internal-databases.md @@ -0,0 +1,58 @@ +Internal databases for MongooseIM. + +Mnesia is a legacy way to cluster MongooseIM nodes. It is also could be used to store data, but we recommend +to use RDBMS databases instead because of scalability and stability reasons. + +CETS is a new way to cluster MongooseIM nodes. It is used to replicate session list data between MongooseIM nodes. +CETS needs to know a list of nodes for the node discovery. There are two ways to get a list of nodes: + +- A text file with a list of nodes on each line. It is useful when there is an external script to make this file based on + some custom logic (for example, a bash script that uses AWS CLI to discover instances in the autoscaling group). This file + would be automatilly reread on change. +- RDBMS database. MongooseIM would write into RDBMS its nodename and read a list of other nodes. It is pretty simple, but + RDBMS database could be a single point of failure. + +Section example: + +```toml +[internal_databases] + [internal_databases.mnesia] + + [internal_databases.cets] + backend = "rdbms" + cluster_name = "mongooseim" + nodelist_file = "cets_disco.txt" +``` + +To enable just CETS, define only `internal_databases.cets` section: + +```toml +[internal_databases] + [internal_databases.cets] +``` + +# CETS Options + +### `internal_databases.cets.backend` + +Backend for CETS discovery. + +* **Syntax:** string, one of `"rdbms"`, `"file"`. +* **Default:** `"rdbms"` +* **Example:** `backend = "rdbms"` + +### `internal_databases.cets.cluster_name` + +Namespace for the cluster. Only nodes with the same cluster name would be discoverd. This option is for RDBMS backend. + +* **Syntax:** string. +* **Default:** `"mongooseim"` +* **Example:** `cluster_name = "mongooseim"` + +### `internal_databases.cets.nodelist_file` + +File to read a list of nodes from. Relative to the MongooseIM's config directory. This option is for the file backend. + +* **Syntax:** path. +* **Default:** `"cets_disco.txt"` +* **Example:** `nodelist_file = "/etc/mim_nodes.txt"` diff --git a/mkdocs.yml b/mkdocs.yml index b61f6a8f7a5..1f947362739 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -69,6 +69,7 @@ nav: - 'Options: General': 'configuration/general.md' - 'Options: Listen': 'configuration/listen.md' - 'Options: Auth': 'configuration/auth.md' + - 'Options: Internal Databases': 'configuration/internal-databases.md' - 'Options: Outgoing connections': 'configuration/outgoing-connections.md' - 'Options: Services': 'configuration/Services.md' - 'Options: Extension Modules': 'configuration/Modules.md' From 40bcaca20d0a1695545a7f66c022e4c6de899775 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 25 May 2023 21:48:41 +0200 Subject: [PATCH 029/161] Remove default value for nodelist_file --- doc/configuration/internal-databases.md | 5 +++-- src/config/mongoose_config_spec.erl | 5 ++--- src/ejabberd_sup.erl | 16 ++++++++++++---- test/common/config_parser_helper.erl | 6 ++---- test/config_parser_SUITE.erl | 4 +++- 5 files changed, 22 insertions(+), 14 deletions(-) diff --git a/doc/configuration/internal-databases.md b/doc/configuration/internal-databases.md index 1db21a0f396..6d409ca9bb5 100644 --- a/doc/configuration/internal-databases.md +++ b/doc/configuration/internal-databases.md @@ -51,8 +51,9 @@ Namespace for the cluster. Only nodes with the same cluster name would be discov ### `internal_databases.cets.nodelist_file` -File to read a list of nodes from. Relative to the MongooseIM's config directory. This option is for the file backend. +File to read a list of nodes from. Relative to the MongooseIM's release directory. This option is for the file backend. +Required, if `backend = "file"`. * **Syntax:** path. -* **Default:** `"cets_disco.txt"` +* **Default:** not specified. * **Example:** `nodelist_file = "/etc/mim_nodes.txt"` diff --git a/src/config/mongoose_config_spec.erl b/src/config/mongoose_config_spec.erl index b5893caf872..23a5177b5e6 100644 --- a/src/config/mongoose_config_spec.erl +++ b/src/config/mongoose_config_spec.erl @@ -442,12 +442,11 @@ internal_database_cets() -> items = #{<<"backend">> => #option{type = atom, validate = {enum, [file, rdbms]}}, <<"cluster_name">> => #option{type = atom, validate = non_empty}, - %% Relative to the config directory (or an absolute name) + %% Relative to the release directory (or an absolute name) <<"nodelist_file">> => #option{type = string, validate = filename} }, - defaults = #{<<"backend">> => rdbms, <<"cluster_name">> => mongooseim, - <<"nodelist_file">> => "cets_disco.txt"} + defaults = #{<<"backend">> => rdbms, <<"cluster_name">> => mongooseim} }. %% path: internal_databases.*.* diff --git a/src/ejabberd_sup.erl b/src/ejabberd_sup.erl index a097b5b64c3..3a358833da6 100644 --- a/src/ejabberd_sup.erl +++ b/src/ejabberd_sup.erl @@ -194,10 +194,18 @@ cets_specs() -> cets_specs(disabled) -> []; -cets_specs(#{backend := DiscoBackend, cluster_name := ClusterName, - nodelist_file := NodeFile}) -> - ConfigDir = filename:dirname(mongoose_config:get_config_path()), - DiscoFile = filename:join(ConfigDir, NodeFile), +cets_specs(#{backend := DiscoBackend, cluster_name := ClusterName} = Opts) -> + DiscoFile = + case {DiscoBackend, Opts} of + {file, #{nodelist_file := NodeFile}} -> + NodeFile; + {file, _} -> + ?LOG_CRITICAL(#{what => nodelist_file_option_is_required, + text => <<"Specify internal_databases.cets.nodelist_file option">>}), + error(nodelist_file_option_is_required); + _ -> + undefined + end, DiscoOpts = #{ backend_module => disco_backend_to_module(DiscoBackend), cluster_name => atom_to_binary(ClusterName), diff --git a/test/common/config_parser_helper.erl b/test/common/config_parser_helper.erl index 04d5cee0a3e..f535dfdaf23 100644 --- a/test/common/config_parser_helper.erl +++ b/test/common/config_parser_helper.erl @@ -65,8 +65,7 @@ options("miscellaneous") -> {hosts, [<<"localhost">>, <<"anonymous.localhost">>]}, {internal_databases, #{cets => - #{backend => rdbms, cluster_name => mongooseim, - nodelist_file => "cets_disco.txt"}, + #{backend => rdbms, cluster_name => mongooseim}, mnesia => #{}}}, {language, <<"en">>}, {listen, @@ -140,8 +139,7 @@ options("mongooseim-pgsql") -> [<<"localhost">>, <<"anonymous.localhost">>, <<"localhost.bis">>]}, {internal_databases, #{cets => - #{backend => rdbms, cluster_name => mongooseim, - nodelist_file => "cets_disco.txt"}, + #{backend => rdbms, cluster_name => mongooseim}, mnesia => #{}}}, {language, <<"en">>}, {listen, diff --git a/test/config_parser_SUITE.erl b/test/config_parser_SUITE.erl index 32442bbf5ec..5de04465947 100644 --- a/test/config_parser_SUITE.erl +++ b/test/config_parser_SUITE.erl @@ -1206,6 +1206,8 @@ test_fast_tls_server(P, T) -> internal_database_cets(_Config) -> CetsEnabled = #{<<"internal_databases">> => #{<<"cets">> => #{}}}, + CetsFile = #{<<"internal_databases">> => #{<<"cets">> => + #{<<"backend">> => <<"file">>, <<"nodelist_file">> => <<"/dev/null">>}}}, %% No internal_databases section means an empty list of databases ?cfg([internal_databases], #{}, #{}), % default %% Empty internal_databases could be configured explicitly @@ -1217,7 +1219,7 @@ internal_database_cets(_Config) -> #{<<"internal_databases">> => #{<<"cets">> => #{<<"cluster_name">> => <<"test">>}}}), ?cfg([internal_databases, cets, cluster_name], mongooseim, CetsEnabled), - ?cfg([internal_databases, cets, nodelist_file], "cets_disco.txt", CetsEnabled), + ?cfg([internal_databases, cets, nodelist_file], "/dev/null", CetsFile), %% If only mnesia section is defined, CETS section is not included ?cfg([internal_databases], #{mnesia => #{}}, #{<<"internal_databases">> => #{<<"mnesia">> => #{}}}), From 712a1645b4bfe54c8be25a3870e7b7cf7780a697 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 25 May 2023 22:01:28 +0200 Subject: [PATCH 030/161] Fix review comments --- doc/configuration/internal-databases.md | 20 ++++++++++++++------ rel/mim1.vars-toml.config | 2 +- rel/mim2.vars-toml.config | 2 +- rel/mim3.vars-toml.config | 2 +- src/config/mongoose_config_spec.erl | 12 ++++-------- src/ejabberd_sup.erl | 8 ++++---- src/mongoose_cets_discovery_rdbms.erl | 2 +- test/config_parser_SUITE.erl | 4 ++-- 8 files changed, 28 insertions(+), 24 deletions(-) diff --git a/doc/configuration/internal-databases.md b/doc/configuration/internal-databases.md index 6d409ca9bb5..91435a9e627 100644 --- a/doc/configuration/internal-databases.md +++ b/doc/configuration/internal-databases.md @@ -1,9 +1,9 @@ -Internal databases for MongooseIM. +Internal databases are used to cluster MongooseIM nodes, and to replicate session list data between them. -Mnesia is a legacy way to cluster MongooseIM nodes. It is also could be used to store data, but we recommend +Mnesia is a legacy way to cluster MongooseIM nodes. It is also could be used to store persistent data, but we recommend to use RDBMS databases instead because of scalability and stability reasons. -CETS is a new way to cluster MongooseIM nodes. It is used to replicate session list data between MongooseIM nodes. +CETS is a new way to cluster MongooseIM nodes. CETS needs to know a list of nodes for the node discovery. There are two ways to get a list of nodes: - A text file with a list of nodes on each line. It is useful when there is an external script to make this file based on @@ -21,7 +21,15 @@ Section example: [internal_databases.cets] backend = "rdbms" cluster_name = "mongooseim" - nodelist_file = "cets_disco.txt" +``` + +or + +```toml +[internal_databases] + [internal_databases.cets] + backend = "file" + node_list_file = "cets_disco.txt" ``` To enable just CETS, define only `internal_databases.cets` section: @@ -49,11 +57,11 @@ Namespace for the cluster. Only nodes with the same cluster name would be discov * **Default:** `"mongooseim"` * **Example:** `cluster_name = "mongooseim"` -### `internal_databases.cets.nodelist_file` +### `internal_databases.cets.node_list_file` File to read a list of nodes from. Relative to the MongooseIM's release directory. This option is for the file backend. Required, if `backend = "file"`. * **Syntax:** path. * **Default:** not specified. -* **Example:** `nodelist_file = "/etc/mim_nodes.txt"` +* **Example:** `node_list_file = "/etc/mim_nodes.txt"` diff --git a/rel/mim1.vars-toml.config b/rel/mim1.vars-toml.config index 694052dcf66..0cf2e6bbd8c 100644 --- a/rel/mim1.vars-toml.config +++ b/rel/mim1.vars-toml.config @@ -20,7 +20,7 @@ {hosts, "\"localhost\", \"anonymous.localhost\", \"localhost.bis\""}. {host_types, "\"test type\", \"dummy auth\", \"anonymous\""}. {default_server_domain, "\"localhost\""}. -{cluster_name, "mim_main"}. +{cluster_name, "mim"}. {mod_amp, ""}. {host_config, diff --git a/rel/mim2.vars-toml.config b/rel/mim2.vars-toml.config index de866e27dd4..758de03b341 100644 --- a/rel/mim2.vars-toml.config +++ b/rel/mim2.vars-toml.config @@ -18,7 +18,7 @@ {hosts, "\"localhost\", \"anonymous.localhost\", \"localhost.bis\""}. {host_types, "\"test type\", \"dummy auth\""}. {default_server_domain, "\"localhost\""}. -{cluster_name, "mim_main"}. +{cluster_name, "mim"}. {s2s_addr, "[[s2s.address]] host = \"localhost2\" ip_address = \"127.0.0.1\""}. diff --git a/rel/mim3.vars-toml.config b/rel/mim3.vars-toml.config index ac6574072fb..6e758440aa6 100644 --- a/rel/mim3.vars-toml.config +++ b/rel/mim3.vars-toml.config @@ -20,7 +20,7 @@ {hosts, "\"localhost\", \"anonymous.localhost\", \"localhost.bis\""}. {default_server_domain, "\"localhost\""}. -{cluster_name, "mim_main"}. +{cluster_name, "mim"}. {s2s_addr, "[[s2s.address]] host = \"localhost2\" diff --git a/src/config/mongoose_config_spec.erl b/src/config/mongoose_config_spec.erl index 23a5177b5e6..34e5e615291 100644 --- a/src/config/mongoose_config_spec.erl +++ b/src/config/mongoose_config_spec.erl @@ -433,8 +433,7 @@ internal_databases() -> #section{items = Items, format_items = map, wrap = global_config, - include = always, - defaults = #{}}. + include = always}. %% path: internal_databases.*.* internal_database_cets() -> @@ -443,18 +442,15 @@ internal_database_cets() -> validate = {enum, [file, rdbms]}}, <<"cluster_name">> => #option{type = atom, validate = non_empty}, %% Relative to the release directory (or an absolute name) - <<"nodelist_file">> => #option{type = string, - validate = filename} + <<"node_list_file">> => #option{type = string, + validate = filename} }, defaults = #{<<"backend">> => rdbms, <<"cluster_name">> => mongooseim} }. %% path: internal_databases.*.* internal_database_mnesia() -> - #section{ - items = #{}, - defaults = #{} - }. + #section{}. %% path: outgoing_pools outgoing_pools() -> diff --git a/src/ejabberd_sup.erl b/src/ejabberd_sup.erl index 3a358833da6..f81fc5ea940 100644 --- a/src/ejabberd_sup.erl +++ b/src/ejabberd_sup.erl @@ -197,12 +197,12 @@ cets_specs(disabled) -> cets_specs(#{backend := DiscoBackend, cluster_name := ClusterName} = Opts) -> DiscoFile = case {DiscoBackend, Opts} of - {file, #{nodelist_file := NodeFile}} -> + {file, #{node_list_file := NodeFile}} -> NodeFile; {file, _} -> - ?LOG_CRITICAL(#{what => nodelist_file_option_is_required, - text => <<"Specify internal_databases.cets.nodelist_file option">>}), - error(nodelist_file_option_is_required); + ?LOG_CRITICAL(#{what => node_list_file_option_is_required, + text => <<"Specify internal_databases.cets.node_list_file option">>}), + error(node_list_file_option_is_required); _ -> undefined end, diff --git a/src/mongoose_cets_discovery_rdbms.erl b/src/mongoose_cets_discovery_rdbms.erl index 2462f02ed0d..106c4e2d91b 100644 --- a/src/mongoose_cets_discovery_rdbms.erl +++ b/src/mongoose_cets_discovery_rdbms.erl @@ -18,7 +18,7 @@ get_nodes(State = #{cluster_name := ClusterName, node_name_to_insert := Node}) - insert(ClusterName, Node), try mongoose_rdbms:execute_successfully(global, cets_disco_select, [ClusterName]) of {selected, Rows} -> - Nodes = [binary_to_atom(X, latin1) || {X} <- Rows, X =/= <<>>], + Nodes = [binary_to_atom(X) || {X} <- Rows, X =/= <<>>], {{ok, Nodes}, State} catch Class:Reason:Stacktrace -> ?LOG_ERROR(#{ diff --git a/test/config_parser_SUITE.erl b/test/config_parser_SUITE.erl index 5de04465947..aa8f0995b5b 100644 --- a/test/config_parser_SUITE.erl +++ b/test/config_parser_SUITE.erl @@ -1207,7 +1207,7 @@ test_fast_tls_server(P, T) -> internal_database_cets(_Config) -> CetsEnabled = #{<<"internal_databases">> => #{<<"cets">> => #{}}}, CetsFile = #{<<"internal_databases">> => #{<<"cets">> => - #{<<"backend">> => <<"file">>, <<"nodelist_file">> => <<"/dev/null">>}}}, + #{<<"backend">> => <<"file">>, <<"node_list_file">> => <<"/dev/null">>}}}, %% No internal_databases section means an empty list of databases ?cfg([internal_databases], #{}, #{}), % default %% Empty internal_databases could be configured explicitly @@ -1219,7 +1219,7 @@ internal_database_cets(_Config) -> #{<<"internal_databases">> => #{<<"cets">> => #{<<"cluster_name">> => <<"test">>}}}), ?cfg([internal_databases, cets, cluster_name], mongooseim, CetsEnabled), - ?cfg([internal_databases, cets, nodelist_file], "/dev/null", CetsFile), + ?cfg([internal_databases, cets, node_list_file], "/dev/null", CetsFile), %% If only mnesia section is defined, CETS section is not included ?cfg([internal_databases], #{mnesia => #{}}, #{<<"internal_databases">> => #{<<"mnesia">> => #{}}}), From bd40df497d7025ec24fb3e09486a0248cdb43cbb Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 21 Jun 2023 17:05:53 +0200 Subject: [PATCH 031/161] Add node_id for discovery_nodes table --- priv/mssql2012.sql | 2 ++ priv/mysql.sql | 2 ++ priv/pg.sql | 2 ++ 3 files changed, 6 insertions(+) diff --git a/priv/mssql2012.sql b/priv/mssql2012.sql index 605aa69bb85..6feb633d43b 100644 --- a/priv/mssql2012.sql +++ b/priv/mssql2012.sql @@ -757,5 +757,7 @@ CREATE TABLE discovery_nodes ( node_name varchar(250), cluster_name varchar(250), updated_timestamp BIGINT NOT NULL, -- in microseconds + node_id INT UNSIGNED, PRIMARY KEY (cluster_name, node_name) ); +CREATE UNIQUE INDEX i_discovery_nodes_nodeid ON discovery_nodes(cluster_name, node_id); diff --git a/priv/mysql.sql b/priv/mysql.sql index 4b619a3b8b7..70826dcc5d6 100644 --- a/priv/mysql.sql +++ b/priv/mysql.sql @@ -549,5 +549,7 @@ CREATE TABLE discovery_nodes ( node_name varchar(250), cluster_name varchar(250), updated_timestamp BIGINT NOT NULL, -- in microseconds + node_id INT UNSIGNED, PRIMARY KEY (cluster_name, node_name) ); +CREATE UNIQUE INDEX i_discovery_nodes_nodeid USING BTREE ON discovery_nodes(cluster_name, node_id); diff --git a/priv/pg.sql b/priv/pg.sql index 7aa5924ec04..42ec43bf5e5 100644 --- a/priv/pg.sql +++ b/priv/pg.sql @@ -509,5 +509,7 @@ CREATE TABLE discovery_nodes ( node_name varchar(250), cluster_name varchar(250), updated_timestamp BIGINT NOT NULL, -- in microseconds + node_id INT UNSIGNED, PRIMARY KEY (cluster_name, node_name) ); +CREATE UNIQUE INDEX i_discovery_nodes_nodeid ON discovery_nodes USING BTREE(cluster_name, node_id); From 5bbe39a325eb9d2c8b86d508cace55a0500c9c20 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 21 Jun 2023 17:14:24 +0200 Subject: [PATCH 032/161] Start Mnesia based on config Report stacktrace if we fail to start the application --- src/ejabberd_app.erl | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/src/ejabberd_app.erl b/src/ejabberd_app.erl index a5d6b4cb17a..7f924286280 100644 --- a/src/ejabberd_app.erl +++ b/src/ejabberd_app.erl @@ -40,18 +40,30 @@ %%% start(normal, _Args) -> + try + do_start() + catch Class:Reason:StackTrace -> + %% Log a stacktrace because while proc_lib:crash_report/4 would report a crash reason, + %% it would not report the stacktrace + ?LOG_CRITICAL(#{what => app_failed_to_start, + class => Class, reason => Reason, stacktrace => StackTrace}), + erlang:raise(Class, Reason, StackTrace) + end; +start(_, _) -> + {error, badarg}. + +do_start() -> mongoose_fips:notify(), write_pid_file(), update_status_file(starting), - db_init(), application:start(cache_tab), mongoose_graphql:init(), translate:start(), - ejabberd_node_id:start(), ejabberd_commands:init(), mongoose_graphql_commands:start(), mongoose_config:start(), + db_init(), mongoose_router:start(), mongoose_logs:set_global_loglevel(mongoose_config:get_opt(loglevel)), mongoose_deprecations:start(), @@ -72,9 +84,7 @@ start(normal, _Args) -> ejabberd_admin:start(), update_status_file(started), ?LOG_NOTICE(#{what => mongooseim_node_started, version => ?MONGOOSE_VERSION, node => node()}), - Sup; -start(_, _) -> - {error, badarg}. + Sup. %% @doc Prepare the application for termination. %% This function is called when an application is about to be stopped, @@ -105,14 +115,24 @@ stop(_State) -> %%% Internal functions %%% db_init() -> + case mongoose_config:get_opt([internal_databases, mnesia], disabled) of + disabled -> + ok; + _ -> + db_init_mnesia(), + mongoose_short_number_node_id_mnesia:init() + end. + +db_init_mnesia() -> + %% Ensure Mnesia is stopped + mnesia:stop(), case mnesia:system_info(extra_db_nodes) of [] -> - application:stop(mnesia), - mnesia:create_schema([node()]), - application:start(mnesia, permanent); + mnesia:create_schema([node()]); _ -> ok end, + mnesia:start(), mnesia:wait_for_tables(mnesia:system_info(local_tables), infinity). -spec broadcast_c2s_shutdown_listeners() -> ok. From 33c8487f41b07dfb6c2ef0e3481783328e261832 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 21 Jun 2023 17:16:18 +0200 Subject: [PATCH 033/161] Use mongoose_start_node_id in ejabberd_local --- src/ejabberd_local.erl | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/ejabberd_local.erl b/src/ejabberd_local.erl index d7b7787c114..41dc2cbf732 100644 --- a/src/ejabberd_local.erl +++ b/src/ejabberd_local.erl @@ -423,23 +423,21 @@ do_unregister_host(Host) -> make_iq_id() -> %% Attach NodeId, so we know to which node to forward the response - {ok, NodeId} = ejabberd_node_id:node_id(), + BinNodeId = mongoose_start_node_id:node_id(), Rand = mongoose_bin:gen_from_crypto(), - <<(integer_to_binary(NodeId))/binary, "_", Rand/binary>>. + <>. %% Parses ID, made by make_iq_id function -spec parse_iq_id(ID :: binary()) -> local_node | {remote_node, node()} | {error, {unknown_node_id, term()} | bad_iq_format}. parse_iq_id(ID) -> - {ok, NodeId} = ejabberd_node_id:node_id(), - BinNodeId = integer_to_binary(NodeId), + BinNodeId = mongoose_start_node_id:node_id(), case binary:split(ID, <<"_">>) of [BinNodeId, _Rest] -> local_node; [OtherBinNodeId, _Rest] -> - OtherNodeId = binary_to_integer(OtherBinNodeId), - case ejabberd_node_id:node_id_to_name(OtherNodeId) of + case mongoose_start_node_id:node_id_to_name(OtherBinNodeId) of {ok, NodeName} -> {remote_node, NodeName}; {error, Reason} -> From 7ba1f5e5ab76cd58cc3f9b77ddba6608a0ce92a9 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 21 Jun 2023 17:19:45 +0200 Subject: [PATCH 034/161] Start CETS after outgoing pools Start mongoose_start_node_id --- src/ejabberd_node_id.erl | 70 ---------------------------------------- src/ejabberd_sup.erl | 11 +++++-- 2 files changed, 8 insertions(+), 73 deletions(-) delete mode 100644 src/ejabberd_node_id.erl diff --git a/src/ejabberd_node_id.erl b/src/ejabberd_node_id.erl deleted file mode 100644 index 0f0d84c80e6..00000000000 --- a/src/ejabberd_node_id.erl +++ /dev/null @@ -1,70 +0,0 @@ -%%% @doc Allocates unique ids for each node. --module(ejabberd_node_id). --export([start/0, node_id/0, node_id_to_name/1]). - - --include("mongoose.hrl"). --include("jlib.hrl"). - --type nodeid() :: non_neg_integer(). --record(node, {name :: atom(), - id :: nodeid() - }). - -start() -> - mnesia:create_table(node, - [{ram_copies, [node()]}, - {type, set}, - {attributes, record_info(fields, node)}]), - mnesia:add_table_copy(node, node(), ram_copies), - mnesia:add_table_index(node, id), - register_node(node()), - ok. - --spec register_node(atom()) -> 'ok'. -register_node(NodeName) -> - {atomic, _} = mnesia:transaction(fun() -> - case mnesia:read(node, NodeName) of - [] -> - mnesia:write(#node{name = NodeName, id = next_node_id()}); - [_] -> ok - end - end), - ok. - -%% @doc Return an integer node ID. --spec node_id() -> {ok, nodeid()}. -node_id() -> - %% Save result into the process's memory space. - case get(node_id) of - undefined -> - {ok, NodeId} = select_node_id(node()), - put(node_id, NodeId), - {ok, NodeId}; - NodeId -> - {ok, NodeId} - end. - -node_id_to_name(ID) -> - case mnesia:dirty_index_read(node, ID, #node.id) of - [] -> - {error, unknown_id}; - [#node{name = Name}] -> - {ok, Name} - end. - --spec next_node_id() -> nodeid(). -next_node_id() -> - max_node_id() + 1. - --spec max_node_id() -> nodeid(). -max_node_id() -> - mnesia:foldl(fun(#node{id=Id}, Max) -> max(Id, Max) end, 0, node). - --spec select_node_id(NodeName :: atom() - ) -> {'error', 'not_found'} | {'ok', nodeid()}. -select_node_id(NodeName) -> - case mnesia:dirty_read(node, NodeName) of - [#node{id=Id}] -> {ok, Id}; - [] -> {error, not_found} - end. diff --git a/src/ejabberd_sup.erl b/src/ejabberd_sup.erl index f81fc5ea940..6d8f6bf11e5 100644 --- a/src/ejabberd_sup.erl +++ b/src/ejabberd_sup.erl @@ -153,13 +153,19 @@ init([]) -> {pg, {pg, start_link, [mim_scope]}, permanent, infinity, supervisor, [pg]}, + StartIdServer = + {mongoose_start_node_id, + {mongoose_start_node_id, start_link, []}, + permanent, infinity, supervisor, [mongoose_start_node_id]}, {ok, {{one_for_one, 10, 1}, - cets_specs() ++ - [PG, + [StartIdServer, + PG, Hooks, Cleaner, SMBackendSupervisor, Router, + OutgoingPoolsSupervisor + ] ++ cets_specs() ++ [ S2S, Local, ReceiverSupervisor, @@ -167,7 +173,6 @@ init([]) -> S2SInSupervisor, S2SOutSupervisor, ServiceSupervisor, - OutgoingPoolsSupervisor, IQSupervisor, Listener, MucIQ, From 58fd64ab8fa531b32b580e4c80f6a49afa497bfa Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 21 Jun 2023 17:25:22 +0200 Subject: [PATCH 035/161] Implement mongoose_start_node_id --- src/mongoose_start_node_id.erl | 65 ++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 src/mongoose_start_node_id.erl diff --git a/src/mongoose_start_node_id.erl b/src/mongoose_start_node_id.erl new file mode 100644 index 00000000000..475bbef8538 --- /dev/null +++ b/src/mongoose_start_node_id.erl @@ -0,0 +1,65 @@ +%% Generates an unique ID on the node start. +%% Registers the ID on all other nodes. +%% Used in ejabberd_local to find to which node to route IQ responses. +-module(mongoose_start_node_id). +-behaviour(gen_server). + +%% API +-export([start_link/0]). +-export([node_id/0, node_id_to_name/1]). + +%% gen_server callbacks +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3]). + +-include("mongoose.hrl"). + +-type id() :: binary(). + +-record(state, {start_id :: id()}). +-define(KEY, ?MODULE). + +-spec node_id() -> id(). +node_id() -> + persistent_term:get(?KEY). + +-spec node_id_to_name(id()) -> {ok, node()} | {error, unknown_id}. +node_id_to_name(ID) -> + case persistent_term:get({?KEY, ID}, undefined) of + undefined -> + {error, unknown_id}; + Name -> + {ok, Name} + end. + +-spec start_link() -> {ok, pid()}. +start_link() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +init(_) -> + net_kernel:monitor_nodes(true), + StartId = mongoose_bin:gen_from_crypto(), + persistent_term:put(mongoose_start_node_id, StartId), + [register_on_remote_node(RemoteNode, StartId) + || RemoteNode <- [node()|nodes()]], + {ok, #state{start_id = StartId}}. + +handle_call(_Request, _From, State) -> + {reply, ok, State}. + +handle_cast(_Msg, State) -> + {noreply, State}. + +handle_info({nodeup, RemoteNode}, State = #state{start_id = StartId}) -> + register_on_remote_node(RemoteNode, StartId), + {noreply, State}; +handle_info(_Info, State) -> + {noreply, State}. + +terminate(_Reason, _State) -> + ok. +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +register_on_remote_node(RemoteNode, StartId) -> + rpc:call(RemoteNode, persistent_term, put, [{?KEY, StartId}, node()]). From fd767435ce6c04beda9d16da96fa54395c05465c Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 21 Jun 2023 17:26:05 +0200 Subject: [PATCH 036/161] Make mnesia an included application --- src/mongooseim.app.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mongooseim.app.src b/src/mongooseim.app.src index 80f93c1d620..d8351364167 100644 --- a/src/mongooseim.app.src +++ b/src/mongooseim.app.src @@ -28,7 +28,6 @@ idna, kernel, lasse, - mnesia, observer_cli, pa, public_key, @@ -54,6 +53,7 @@ segmented_cache, cets ]}, + {included_applications, [mnesia]}, {env, []}, {mod, {ejabberd_app, []}}]}. From f69dfda4fb6ef5b266d39ea49a7a5284c3b6f653 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 21 Jun 2023 17:42:20 +0200 Subject: [PATCH 037/161] Still start mnesia in pgsql_cets preset --- big_tests/test.config | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/big_tests/test.config b/big_tests/test.config index 3ea5695ba62..0c4422b7f82 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -243,7 +243,8 @@ {stream_management_backend, cets}, {auth_method, "rdbms"}, {internal_databases, "[internal_databases.cets] - cluster_name = \"{{cluster_name}}\""}, + cluster_name = \"{{cluster_name}}\" +[internal_databases.mnesia]"}, %% We still using mnesia for modules that are not converted to use CETS {outgoing_pools, "[outgoing_pools.redis.global_distrib] scope = \"global\" workers = 10 From e1b7ad2e2e233a729c8b55149fa7644771296c95 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 21 Jun 2023 17:28:30 +0200 Subject: [PATCH 038/161] Add mongoose_node_num instead of ejabberd_node_id There are two backends: Mnesia or CETS disco. If none backends are useful, we fallback to node_id to be 0 (which is not that major issue for MAM ID collisions) --- priv/mssql2012.sql | 4 +- priv/mysql.sql | 4 +- priv/pg.sql | 4 +- src/ejabberd_app.erl | 2 +- src/mam/mod_mam_muc.erl | 2 +- src/mam/mod_mam_pm.erl | 2 +- src/mam/mod_mam_utils.erl | 14 ++-- src/mongoose_cets_discovery_rdbms.erl | 102 ++++++++++++++++---------- src/mongoose_node_num.erl | 30 ++++++++ src/mongoose_node_num_mnesia.erl | 36 +++++++++ 10 files changed, 147 insertions(+), 53 deletions(-) create mode 100644 src/mongoose_node_num.erl create mode 100644 src/mongoose_node_num_mnesia.erl diff --git a/priv/mssql2012.sql b/priv/mssql2012.sql index 6feb633d43b..9f55767fd8a 100644 --- a/priv/mssql2012.sql +++ b/priv/mssql2012.sql @@ -757,7 +757,7 @@ CREATE TABLE discovery_nodes ( node_name varchar(250), cluster_name varchar(250), updated_timestamp BIGINT NOT NULL, -- in microseconds - node_id INT UNSIGNED, + node_num INT UNSIGNED NOT NULL, PRIMARY KEY (cluster_name, node_name) ); -CREATE UNIQUE INDEX i_discovery_nodes_nodeid ON discovery_nodes(cluster_name, node_id); +CREATE UNIQUE INDEX i_discovery_nodes_node_num ON discovery_nodes(cluster_name, node_num); diff --git a/priv/mysql.sql b/priv/mysql.sql index 70826dcc5d6..392dd0df636 100644 --- a/priv/mysql.sql +++ b/priv/mysql.sql @@ -549,7 +549,7 @@ CREATE TABLE discovery_nodes ( node_name varchar(250), cluster_name varchar(250), updated_timestamp BIGINT NOT NULL, -- in microseconds - node_id INT UNSIGNED, + node_num INT UNSIGNED NOT NULL, PRIMARY KEY (cluster_name, node_name) ); -CREATE UNIQUE INDEX i_discovery_nodes_nodeid USING BTREE ON discovery_nodes(cluster_name, node_id); +CREATE UNIQUE INDEX i_discovery_nodes_node_num USING BTREE ON discovery_nodes(cluster_name, node_num); diff --git a/priv/pg.sql b/priv/pg.sql index 42ec43bf5e5..58f64ec30ae 100644 --- a/priv/pg.sql +++ b/priv/pg.sql @@ -509,7 +509,7 @@ CREATE TABLE discovery_nodes ( node_name varchar(250), cluster_name varchar(250), updated_timestamp BIGINT NOT NULL, -- in microseconds - node_id INT UNSIGNED, + node_num INT NOT NULL, PRIMARY KEY (cluster_name, node_name) ); -CREATE UNIQUE INDEX i_discovery_nodes_nodeid ON discovery_nodes USING BTREE(cluster_name, node_id); +CREATE UNIQUE INDEX i_discovery_nodes_node_num ON discovery_nodes USING BTREE(cluster_name, node_num); diff --git a/src/ejabberd_app.erl b/src/ejabberd_app.erl index 7f924286280..2df8af3f590 100644 --- a/src/ejabberd_app.erl +++ b/src/ejabberd_app.erl @@ -120,7 +120,7 @@ db_init() -> ok; _ -> db_init_mnesia(), - mongoose_short_number_node_id_mnesia:init() + mongoose_node_num_mnesia:init() end. db_init_mnesia() -> diff --git a/src/mam/mod_mam_muc.erl b/src/mam/mod_mam_muc.erl index f12aa587140..42f4fe49fc5 100644 --- a/src/mam/mod_mam_muc.erl +++ b/src/mam/mod_mam_muc.erl @@ -23,7 +23,7 @@ %%% %%%
    %%%
  • date (using `timestamp()');
  • -%%%
  • node number (using {@link ejabberd_node_id}).
  • +%%%
  • node number (using {@link mongoose_node_num}).
  • %%%
%%% @end %%%------------------------------------------------------------------- diff --git a/src/mam/mod_mam_pm.erl b/src/mam/mod_mam_pm.erl index 619f8763d1c..db437b7ab97 100644 --- a/src/mam/mod_mam_pm.erl +++ b/src/mam/mod_mam_pm.erl @@ -23,7 +23,7 @@ %%% %%%
    %%%
  • date (using `timestamp()');
  • -%%%
  • node number (using {@link ejabberd_node_id}).
  • +%%%
  • node number (using {@link mongoose_node_num}).
  • %%%
%%% @end %%%------------------------------------------------------------------- diff --git a/src/mam/mod_mam_utils.erl b/src/mam/mod_mam_utils.erl index 31e7494f3dd..2a8ff2c9915 100644 --- a/src/mam/mod_mam_utils.erl +++ b/src/mam/mod_mam_utils.erl @@ -179,9 +179,9 @@ get_or_generate_mam_id(Acc) -> -spec generate_message_id(integer()) -> integer(). generate_message_id(CandidateStamp) -> - {ok, NodeId} = ejabberd_node_id:node_id(), + NodeNum = mongoose_node_num:node_num(), UniqueStamp = mongoose_mam_id:next_unique(CandidateStamp), - encode_compact_uuid(UniqueStamp, NodeId). + encode_compact_uuid(UniqueStamp, NodeNum). %% @doc Create a message ID (UID). %% @@ -189,17 +189,17 @@ generate_message_id(CandidateStamp) -> %% It puts node id as a last byte. %% The maximum date, that can be encoded is `{{4253, 5, 31}, {22, 20, 37}}'. -spec encode_compact_uuid(integer(), integer()) -> integer(). -encode_compact_uuid(Microseconds, NodeId) - when is_integer(Microseconds), is_integer(NodeId) -> - (Microseconds bsl 8) + NodeId. +encode_compact_uuid(Microseconds, NodeNum) + when is_integer(Microseconds), is_integer(NodeNum) -> + (Microseconds bsl 8) + NodeNum. %% @doc Extract date and node id from a message id. -spec decode_compact_uuid(integer()) -> {integer(), byte()}. decode_compact_uuid(Id) -> Microseconds = Id bsr 8, - NodeId = Id band 255, - {Microseconds, NodeId}. + NodeNum = Id band 255, + {Microseconds, NodeNum}. %% @doc Encode a message ID to pass it to the user. diff --git a/src/mongoose_cets_discovery_rdbms.erl b/src/mongoose_cets_discovery_rdbms.erl index 106c4e2d91b..9d4083ac387 100644 --- a/src/mongoose_cets_discovery_rdbms.erl +++ b/src/mongoose_cets_discovery_rdbms.erl @@ -14,43 +14,71 @@ init(Opts = #{cluster_name := _, node_name_to_insert := _}) -> -spec get_nodes(state()) -> {cets_discovery:get_nodes_result(), state()}. get_nodes(State = #{cluster_name := ClusterName, node_name_to_insert := Node}) -> - prepare(), - insert(ClusterName, Node), - try mongoose_rdbms:execute_successfully(global, cets_disco_select, [ClusterName]) of - {selected, Rows} -> - Nodes = [binary_to_atom(X) || {X} <- Rows, X =/= <<>>], - {{ok, Nodes}, State} - catch Class:Reason:Stacktrace -> - ?LOG_ERROR(#{ - what => discovery_failed_select, - class => Class, - reason => Reason, - stacktrace => Stacktrace - }), - {{error, Reason}, State} + try + {Num, Nodes} = try_register(ClusterName, Node), + mongoose_node_num:set_node_num(Num), + {{ok, Nodes}, State} + catch Class:Reason:Stacktrace -> + ?LOG_ERROR(#{what => discovery_failed_select, class => Class, + reason => Reason, stacktrace => Stacktrace}), + {{error, Reason}, State} end. +try_register(ClusterName, Node) -> + prepare(), + {selected, Rows} = select(ClusterName), + Pairs = [{binary_to_atom(NodeBin), Num} || {NodeBin, Num} <- Rows], + {Nodes, Nums} = lists:unzip(Pairs), + Inserted = lists:member(Node, Nodes), + Timestamp = timestamp(), + NodeNum = + case Inserted of + true -> + update_existing(ClusterName, Node, Timestamp), + {value, {_, Num}} = lists:keysearch(Node, 1, Pairs), + Num; + false -> + Num = next_free_num(lists:usort(Nums)), + insert_new(ClusterName, Node, Timestamp, Num), + Num + end, + {NodeNum, Nodes}. + prepare() -> - Filter = [<<"node_name">>, <<"cluster_name">>], - Fields = [<<"updated_timestamp">>], - rdbms_queries:prepare_upsert(global, cets_disco_insert, discovery_nodes, - Filter ++ Fields, Fields, Filter), - mongoose_rdbms:prepare(cets_disco_select, discovery_nodes, [cluster_name], - <<"SELECT node_name FROM discovery_nodes WHERE cluster_name = ?">>). - -insert(ClusterName, Node) -> - Timestamp = os:system_time(microsecond), - Filter = [Node, ClusterName], - Fields = [Timestamp], - try - {updated, _} = rdbms_queries:execute_upsert(global, cets_disco_insert, - Filter ++ Fields, Fields, - Filter) - catch Class:Reason:Stacktrace -> - ?LOG_ERROR(#{ - what => discovery_failed_insert, - class => Class, - reason => Reason, - stacktrace => Stacktrace - }) - end. + T = discovery_nodes, + mongoose_rdbms:prepare(cets_disco_select, T, [cluster_name], select()), + mongoose_rdbms:prepare(cets_disco_insert_new, T, + [cluster_name, node_name, node_num, timestamp], insert_new()), + mongoose_rdbms:prepare(cets_disco_update_existing, T, + [timestamp, cluster_name, node_name], update_existing()). + +select() -> + <<"SELECT node_name, node_num FROM discovery_nodes WHERE cluster_name = ?">>. + +select(ClusterName) -> + mongoose_rdbms:execute_successfully(global, cets_disco_select, [ClusterName]). + +insert_new() -> + <<"INSERT INTO discovery_nodes (cluster_name, node_name, node_num, timestamp)" + " VALUES (?, ?, ?, ?)">>. + +insert_new(ClusterName, Node, Timestamp, Num) -> + mongoose_rdbms:execute(global, cets_disco_insert_new, [ClusterName, Node, Num, Timestamp]). + +update_existing() -> + <<"UPDATE discovery_nodes SET timestamp = ? WHERE cluster_name = ? AND node_name = ?">>. + +update_existing(ClusterName, Node, Timestamp) -> + mongoose_rdbms:execute(global, cets_disco_update_existing, [Timestamp, ClusterName, Node]). + +timestamp() -> + os:system_time(microsecond). + +%% Returns a next free node id based on the currently registered ids +next_free_num([]) -> + 0; +next_free_num([H | T = [E | _]]) when ((H + 1) =:= E) -> + %% Sequential, ignore H + next_free_num(T); +next_free_num([H | _]) -> + H + 1. diff --git a/src/mongoose_node_num.erl b/src/mongoose_node_num.erl new file mode 100644 index 00000000000..68c53c7d96a --- /dev/null +++ b/src/mongoose_node_num.erl @@ -0,0 +1,30 @@ +%% Returns a numeric id from 0 to 255 for the current node. +%% Used to generate MAM IDs. +-module(mongoose_node_num). +-export([set_node_num/1]). +-export([node_num/0]). + +-include("mongoose.hrl"). +-include("jlib.hrl"). +-include("mongoose_config_spec.hrl"). +-include("mongoose_logger.hrl"). + +-type node_num() :: 0..255. +-define(KEY, ?MODULE). +-export_type([node_num/0]). + +%% @doc Return an integer node ID. +-spec node_num() -> node_num(). +node_num() -> + %% We just return 0 if service is not running. + persistent_term:get(?KEY, 0). + +-spec set_node_num(node_num()) -> ignore | updated | same. +set_node_num(Num) -> + case node_num() =:= Num of + true -> + same; + false -> + persistent_term:put(?KEY, Num), + updated + end. diff --git a/src/mongoose_node_num_mnesia.erl b/src/mongoose_node_num_mnesia.erl new file mode 100644 index 00000000000..4a3f209f53a --- /dev/null +++ b/src/mongoose_node_num_mnesia.erl @@ -0,0 +1,36 @@ +-module(mongoose_node_num_mnesia). + +-export([init/0]). + +-record(node_num, {name :: atom(), + num :: mongoose_node_num:node_num() }). + +init() -> + mnesia:create_table(node_num, + [{ram_copies, [node()]}, {type, set}, + {attributes, record_info(fields, node_num)}]), + mnesia:add_table_index(node_num, num), + mnesia:add_table_copy(node_num, node(), ram_copies), + register_node(node()), + [#node_num{num = Num}] = mnesia:dirty_read(node_num, node()), + mongoose_node_num:set_node_num(Num), + ok. + +-spec register_node(atom()) -> ok. +register_node(NodeName) -> + {atomic, _} = mnesia:transaction(fun() -> + case mnesia:read(node_num, NodeName) of + [] -> + mnesia:write(#node_num{name = NodeName, num = next_node_num()}); + [_] -> ok + end + end), + ok. + +-spec next_node_num() -> mongoose_node_num:node_num(). +next_node_num() -> + max_node_num() + 1. + +-spec max_node_num() -> mongoose_node_num:node_num(). +max_node_num() -> + mnesia:foldl(fun(#node_num{num = Num}, Max) -> max(Num, Max) end, 0, node_num). From 372b8fd721dab05f667b2e492f356ba736f2401e Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 21 Jun 2023 22:04:52 +0200 Subject: [PATCH 039/161] Fix xref for mongoose_start_node_id --- src/mongoose_start_node_id.erl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/mongoose_start_node_id.erl b/src/mongoose_start_node_id.erl index 475bbef8538..7f45069fa6c 100644 --- a/src/mongoose_start_node_id.erl +++ b/src/mongoose_start_node_id.erl @@ -12,6 +12,8 @@ -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). +-ignore_xref([start_link/0]). + -include("mongoose.hrl"). -type id() :: binary(). From 506a7a4c69660c63e1a1e69bc563e1948d197c48 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 22 Jun 2023 09:54:06 +0200 Subject: [PATCH 040/161] Fix updated_timestamp column name --- src/mongoose_cets_discovery_rdbms.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/mongoose_cets_discovery_rdbms.erl b/src/mongoose_cets_discovery_rdbms.erl index 9d4083ac387..50748ddef68 100644 --- a/src/mongoose_cets_discovery_rdbms.erl +++ b/src/mongoose_cets_discovery_rdbms.erl @@ -48,9 +48,9 @@ prepare() -> T = discovery_nodes, mongoose_rdbms:prepare(cets_disco_select, T, [cluster_name], select()), mongoose_rdbms:prepare(cets_disco_insert_new, T, - [cluster_name, node_name, node_num, timestamp], insert_new()), + [cluster_name, node_name, node_num, updated_timestamp], insert_new()), mongoose_rdbms:prepare(cets_disco_update_existing, T, - [timestamp, cluster_name, node_name], update_existing()). + [updated_timestamp, cluster_name, node_name], update_existing()). select() -> <<"SELECT node_name, node_num FROM discovery_nodes WHERE cluster_name = ?">>. @@ -59,14 +59,14 @@ select(ClusterName) -> mongoose_rdbms:execute_successfully(global, cets_disco_select, [ClusterName]). insert_new() -> - <<"INSERT INTO discovery_nodes (cluster_name, node_name, node_num, timestamp)" + <<"INSERT INTO discovery_nodes (cluster_name, node_name, node_num, updated_timestamp)" " VALUES (?, ?, ?, ?)">>. insert_new(ClusterName, Node, Timestamp, Num) -> mongoose_rdbms:execute(global, cets_disco_insert_new, [ClusterName, Node, Num, Timestamp]). update_existing() -> - <<"UPDATE discovery_nodes SET timestamp = ? WHERE cluster_name = ? AND node_name = ?">>. + <<"UPDATE discovery_nodes SET updated_timestamp = ? WHERE cluster_name = ? AND node_name = ?">>. update_existing(ClusterName, Node, Timestamp) -> mongoose_rdbms:execute(global, cets_disco_update_existing, [Timestamp, ClusterName, Node]). From 8167380742236f241fda06bf9371bcc9f033cd5f Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 22 Jun 2023 10:35:20 +0200 Subject: [PATCH 041/161] Fix cets_disco_SUITE:rdbms_backend backend We don't care if test2 is returned in a list returned on test2 node --- big_tests/tests/cets_disco_SUITE.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/big_tests/tests/cets_disco_SUITE.erl b/big_tests/tests/cets_disco_SUITE.erl index b8efd04986e..2ac117bd366 100644 --- a/big_tests/tests/cets_disco_SUITE.erl +++ b/big_tests/tests/cets_disco_SUITE.erl @@ -69,4 +69,5 @@ rdbms_backend(_Config) -> rpc(mim(), mongoose_cets_discovery_rdbms, get_nodes, [State1]), State2 = rpc(mim(), mongoose_cets_discovery_rdbms, init, [Opts2]), {{ok, Nodes}, _} = rpc(mim(), mongoose_cets_discovery_rdbms, get_nodes, [State2]), - [test1, test2] = lists:sort(Nodes). + %% "test2" node can see "test1" + lists:member(test1, Nodes). From e3fd670f5952070fa73ba0490fc15fd7dbeb8e77 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 22 Jun 2023 12:01:38 +0200 Subject: [PATCH 042/161] Ensure ejabberd_sup is started in mongoose_cleanup_SUITE Fixes mongoose_cleanup_SUITE:stream_management stop logic --- test/mongoose_cleanup_SUITE.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/test/mongoose_cleanup_SUITE.erl b/test/mongoose_cleanup_SUITE.erl index b15b105715f..e9907e049fa 100644 --- a/test/mongoose_cleanup_SUITE.erl +++ b/test/mongoose_cleanup_SUITE.erl @@ -32,6 +32,7 @@ all() -> ]. init_per_suite(Config) -> + mim_ct_sup:start_link(ejabberd_sup), {ok, _} = application:ensure_all_started(jid), ok = mnesia:create_schema([node()]), ok = mnesia:start(), From 393ff75e5563e281d89fd71d64b5cdb4f4a25277 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 22 Jun 2023 15:22:53 +0200 Subject: [PATCH 043/161] Fix mnesia starting in small tests --- test/mongoose_config_SUITE.erl | 2 ++ test/mongoose_listener_SUITE.erl | 1 + 2 files changed, 3 insertions(+) diff --git a/test/mongoose_config_SUITE.erl b/test/mongoose_config_SUITE.erl index d8ea28afda8..99e9447cbdb 100644 --- a/test/mongoose_config_SUITE.erl +++ b/test/mongoose_config_SUITE.erl @@ -28,6 +28,7 @@ groups() -> ]. init_per_suite(Config) -> + mnesia:start(), %% TODO Remove this call when possible (We still need it for s2s) {ok, _} = application:ensure_all_started(jid), Config. @@ -203,6 +204,7 @@ do_start_slave_node() -> {init_timeout, 10}, %% in seconds {startup_timeout, 10}], %% in seconds {ok, SlaveNode} = ct_slave:start(slave_name(), Opts), + rpc:call(SlaveNode, mnesia, start, []), %% TODO remove this call when possible {ok, CWD} = file:get_cwd(), ok = rpc:call(SlaveNode, file, set_cwd, [CWD]), %% Tell the remote node where to find the SUITE code diff --git a/test/mongoose_listener_SUITE.erl b/test/mongoose_listener_SUITE.erl index 34f8819147f..ecfff069188 100644 --- a/test/mongoose_listener_SUITE.erl +++ b/test/mongoose_listener_SUITE.erl @@ -29,6 +29,7 @@ end_per_testcase(_Case, Config) -> Config. init_per_suite(C) -> + mnesia:start(), C. end_per_suite(_C) -> From 22a8c888b0283f453311063a2665b836575764dc Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 22 Jun 2023 16:28:22 +0200 Subject: [PATCH 044/161] Fix mssql schema No unsigned type --- priv/mssql2012.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/priv/mssql2012.sql b/priv/mssql2012.sql index 9f55767fd8a..92df0320e53 100644 --- a/priv/mssql2012.sql +++ b/priv/mssql2012.sql @@ -757,7 +757,7 @@ CREATE TABLE discovery_nodes ( node_name varchar(250), cluster_name varchar(250), updated_timestamp BIGINT NOT NULL, -- in microseconds - node_num INT UNSIGNED NOT NULL, + node_num INT NOT NULL, PRIMARY KEY (cluster_name, node_name) ); CREATE UNIQUE INDEX i_discovery_nodes_node_num ON discovery_nodes(cluster_name, node_num); From 57456d8d9768787db5c2a59ffabc4398543238b0 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 22 Jun 2023 19:27:53 +0200 Subject: [PATCH 045/161] Add test for mongoose_cets_discovery_rdbms:next_free_num --- src/mongoose_cets_discovery_rdbms.erl | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/mongoose_cets_discovery_rdbms.erl b/src/mongoose_cets_discovery_rdbms.erl index 50748ddef68..ab8d64cc77e 100644 --- a/src/mongoose_cets_discovery_rdbms.erl +++ b/src/mongoose_cets_discovery_rdbms.erl @@ -5,6 +5,10 @@ -include_lib("kernel/include/logger.hrl"). +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). +-endif. + -type opts() :: #{cluster_name => binary(), node_name_to_insert => binary()}. -type state() :: opts(). @@ -82,3 +86,12 @@ next_free_num([H | T = [E | _]]) when ((H + 1) =:= E) -> next_free_num(T); next_free_num([H | _]) -> H + 1. + +-ifdef(TEST). + +jid_to_opt_binary_test_() -> + [?_assertEqual(0, next_free_num([])), + ?_assertEqual(3, next_free_num([1, 2, 5])), + ?_assertEqual(3, next_free_num([1, 2]))]. + +-endif. From 1f8bea8c0059d807a795369f3930025ec3c875f1 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 22 Jun 2023 20:06:41 +0200 Subject: [PATCH 046/161] Deregister node ids in mongoose_start_node_id when node is down --- src/mongoose_start_node_id.erl | 50 +++++++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 4 deletions(-) diff --git a/src/mongoose_start_node_id.erl b/src/mongoose_start_node_id.erl index 7f45069fa6c..82dbd1047b9 100644 --- a/src/mongoose_start_node_id.erl +++ b/src/mongoose_start_node_id.erl @@ -7,18 +7,20 @@ %% API -export([start_link/0]). -export([node_id/0, node_id_to_name/1]). +-export([register_on_remote_node_rpc/3]). %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). --ignore_xref([start_link/0]). +-ignore_xref([start_link/0, register_on_remote_node_rpc/3]). -include("mongoose.hrl"). +-include("mongoose_logger.hrl"). -type id() :: binary(). --record(state, {start_id :: id()}). +-record(state, {start_id :: id(), mon_ref_to_start_id :: map()}). -define(KEY, ?MODULE). -spec node_id() -> id(). @@ -44,17 +46,44 @@ init(_) -> persistent_term:put(mongoose_start_node_id, StartId), [register_on_remote_node(RemoteNode, StartId) || RemoteNode <- [node()|nodes()]], - {ok, #state{start_id = StartId}}. + {ok, #state{start_id = StartId, mon_ref_to_start_id = #{}}}. handle_call(_Request, _From, State) -> {reply, ok, State}. +handle_cast({register_cleaning_task, StartId, RemotePid}, + State = #state{mon_ref_to_start_id = Mon2StartId}) -> + MonRef = erlang:monitor(process, RemotePid), + Mon2StartId2 = maps:put(MonRef, StartId, Mon2StartId), + {noreply, State#state{mon_ref_to_start_id = Mon2StartId2}}; handle_cast(_Msg, State) -> {noreply, State}. handle_info({nodeup, RemoteNode}, State = #state{start_id = StartId}) -> register_on_remote_node(RemoteNode, StartId), {noreply, State}; +handle_info({'DOWN', MonRef, process, RemotePid, Reason}, + State = #state{mon_ref_to_start_id = Mon2StartId}) -> + case maps:get(MonRef, Mon2StartId, undefined) of + undefined -> + ?LOG_ERROR(#{what => node_id_unexpected_monitor, + reason => Reason, + monitor_ref => MonRef, + remote_pid => RemotePid, + remote_node => node(RemotePid)}); + StartId -> + persistent_term:erase({?KEY, StartId}), + ?LOG_WARNING(#{what => node_id_node_down, + reason => Reason, + monitor_ref => MonRef, + remote_pid => RemotePid, + remote_node => node(RemotePid)}) + end, + %% We use pid monitors instead of node monitors to avoid cleaning + %% start id when a node is restarting and reappearing very quicky. + %% I.e. node name could be reused by a newly started node, while Refs - not. + %% Pids could be also reused, but collisions are rare. + {noreply, State#state{mon_ref_to_start_id = maps:remove(MonRef, Mon2StartId)}}; handle_info(_Info, State) -> {noreply, State}. @@ -64,4 +93,17 @@ code_change(_OldVsn, State, _Extra) -> {ok, State}. register_on_remote_node(RemoteNode, StartId) -> - rpc:call(RemoteNode, persistent_term, put, [{?KEY, StartId}, node()]). + Res = rpc:call(RemoteNode, ?MODULE, register_on_remote_node_rpc, + [node(), StartId, self()]), + case Res of + ok -> + ok; + _ -> + ?LOG_ERROR(#{what => node_id_register_on_remote_node_failed, + remote_node => RemoteNode, reason => Res}) + end. + +register_on_remote_node_rpc(RemoteNode, StartId, RemotePid) -> + persistent_term:put({?KEY, StartId}, RemoteNode), + gen_server:cast(?MODULE, {register_cleaning_task, StartId, RemotePid}), + ok. From 4bff87e65f45032f979bea7c361a5d13e2e38cca Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 23 Jun 2023 11:19:16 +0200 Subject: [PATCH 047/161] Add a comment why mnesia:stop() is inside the start function --- src/ejabberd_app.erl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/ejabberd_app.erl b/src/ejabberd_app.erl index 2df8af3f590..7d77de129e6 100644 --- a/src/ejabberd_app.erl +++ b/src/ejabberd_app.erl @@ -107,6 +107,9 @@ stop(_State) -> ?LOG_NOTICE(#{what => mongooseim_node_stopped, version => ?MONGOOSE_VERSION, node => node()}), delete_pid_file(), update_status_file(stopped), + %% We cannot stop other applications inside of the stop callback + %% (because we would deadlock the application controller process). + %% That is why we call mnesia:stop() inside of db_init_mnesia() instead. %%ejabberd_debug:stop(), ok. @@ -124,6 +127,7 @@ db_init() -> end. db_init_mnesia() -> + %% Mnesia should not be running at this point, unless it is started by tests. %% Ensure Mnesia is stopped mnesia:stop(), case mnesia:system_info(extra_db_nodes) of From 1b25b2bf0e52683e966d28953828900489b30311 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 23 Jun 2023 12:45:12 +0200 Subject: [PATCH 048/161] Improve coverage in cets_disco --- big_tests/tests/cets_disco_SUITE.erl | 7 +++++-- src/mongoose_cets_discovery_rdbms.erl | 15 ++++++++------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/big_tests/tests/cets_disco_SUITE.erl b/big_tests/tests/cets_disco_SUITE.erl index 2ac117bd366..35e99c7ed52 100644 --- a/big_tests/tests/cets_disco_SUITE.erl +++ b/big_tests/tests/cets_disco_SUITE.erl @@ -68,6 +68,9 @@ rdbms_backend(_Config) -> State1 = rpc(mim(), mongoose_cets_discovery_rdbms, init, [Opts1]), rpc(mim(), mongoose_cets_discovery_rdbms, get_nodes, [State1]), State2 = rpc(mim(), mongoose_cets_discovery_rdbms, init, [Opts2]), - {{ok, Nodes}, _} = rpc(mim(), mongoose_cets_discovery_rdbms, get_nodes, [State2]), + {{ok, Nodes}, State2_2} = rpc(mim(), mongoose_cets_discovery_rdbms, get_nodes, [State2]), %% "test2" node can see "test1" - lists:member(test1, Nodes). + true = lists:member(test1, Nodes), + {{ok, _}, State2_3} = rpc(mim(), mongoose_cets_discovery_rdbms, get_nodes, [State2_2]), + %% Check that we follow the right code branch + #{last_query_info := #{already_registered := true}} = State2_3. diff --git a/src/mongoose_cets_discovery_rdbms.erl b/src/mongoose_cets_discovery_rdbms.erl index ab8d64cc77e..806128c7a32 100644 --- a/src/mongoose_cets_discovery_rdbms.erl +++ b/src/mongoose_cets_discovery_rdbms.erl @@ -9,19 +9,19 @@ -include_lib("eunit/include/eunit.hrl"). -endif. --type opts() :: #{cluster_name => binary(), node_name_to_insert => binary()}. +-type opts() :: #{cluster_name => binary(), node_name_to_insert => binary(), last_query_info => term()}. -type state() :: opts(). -spec init(opts()) -> state(). init(Opts = #{cluster_name := _, node_name_to_insert := _}) -> - Opts. + Opts#{last_query_info => #{}}. -spec get_nodes(state()) -> {cets_discovery:get_nodes_result(), state()}. get_nodes(State = #{cluster_name := ClusterName, node_name_to_insert := Node}) -> try - {Num, Nodes} = try_register(ClusterName, Node), + {Num, Nodes, Info} = try_register(ClusterName, Node), mongoose_node_num:set_node_num(Num), - {{ok, Nodes}, State} + {{ok, Nodes}, State#{last_query_info => Info}} catch Class:Reason:Stacktrace -> ?LOG_ERROR(#{what => discovery_failed_select, class => Class, reason => Reason, stacktrace => Stacktrace}), @@ -33,10 +33,10 @@ try_register(ClusterName, Node) -> {selected, Rows} = select(ClusterName), Pairs = [{binary_to_atom(NodeBin), Num} || {NodeBin, Num} <- Rows], {Nodes, Nums} = lists:unzip(Pairs), - Inserted = lists:member(Node, Nodes), + AlreadyRegistered = lists:member(Node, Nodes), Timestamp = timestamp(), NodeNum = - case Inserted of + case AlreadyRegistered of true -> update_existing(ClusterName, Node, Timestamp), {value, {_, Num}} = lists:keysearch(Node, 1, Pairs), @@ -46,7 +46,8 @@ try_register(ClusterName, Node) -> insert_new(ClusterName, Node, Timestamp, Num), Num end, - {NodeNum, Nodes}. + Info = #{already_registered => AlreadyRegistered, timestamp => Timestamp, node_num => Num}, + {NodeNum, Nodes, Info}. prepare() -> T = discovery_nodes, From 171fa01a856a8492da6ccb1c0a148144fc634f3a Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 23 Jun 2023 13:13:06 +0200 Subject: [PATCH 049/161] Add start_node_id_SUITE --- big_tests/default.spec | 1 + big_tests/dynamic_domains.spec | 1 + big_tests/tests/start_node_id_SUITE.erl | 55 +++++++++++++++++++++++++ 3 files changed, 57 insertions(+) create mode 100644 big_tests/tests/start_node_id_SUITE.erl diff --git a/big_tests/default.spec b/big_tests/default.spec index 7cd97facc0e..98b318fe308 100644 --- a/big_tests/default.spec +++ b/big_tests/default.spec @@ -117,6 +117,7 @@ {suites, "tests", local_iq_SUITE}. {suites, "tests", tcp_listener_SUITE}. {suites, "tests", cets_disco_SUITE}. +{suites, "tests", start_node_id_SUITE}. {config, ["test.config"]}. {logdir, "ct_report"}. diff --git a/big_tests/dynamic_domains.spec b/big_tests/dynamic_domains.spec index 8ec9e987850..8d8e1a53e1f 100644 --- a/big_tests/dynamic_domains.spec +++ b/big_tests/dynamic_domains.spec @@ -159,6 +159,7 @@ {suites, "tests", local_iq_SUITE}. {suites, "tests", tcp_listener_SUITE}. {suites, "tests", cets_disco_SUITE}. +{suites, "tests", start_node_id_SUITE}. {config, ["dynamic_domains.config", "test.config"]}. diff --git a/big_tests/tests/start_node_id_SUITE.erl b/big_tests/tests/start_node_id_SUITE.erl new file mode 100644 index 00000000000..368e43ec9c6 --- /dev/null +++ b/big_tests/tests/start_node_id_SUITE.erl @@ -0,0 +1,55 @@ +-module(start_node_id_SUITE). +-compile([export_all, nowarn_export_all]). + +-import(distributed_helper, [mim/0, rpc/4]). +-include_lib("common_test/include/ct.hrl"). + +%%-------------------------------------------------------------------- +%% Suite configuration +%%-------------------------------------------------------------------- + +all() -> + [{group, all}]. + +groups() -> + [{all, [], cases()}]. + +cases() -> + [cleaning_works]. + +suite() -> + escalus:suite(). + +%%-------------------------------------------------------------------- +%% Init & teardown +%%-------------------------------------------------------------------- +init_per_suite(Config) -> + escalus:init_per_suite(Config). + +end_per_suite(Config) -> + escalus:end_per_suite(Config). + +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(CaseName, Config) -> + escalus:init_per_testcase(CaseName, Config). + +end_per_testcase(CaseName, Config) -> + escalus:end_per_testcase(CaseName, Config). + +%%-------------------------------------------------------------------- +%% Test cases +%%-------------------------------------------------------------------- + +cleaning_works(Config) -> + Id = <<"someid139455">>, + Pid = spawn_link(fun() -> receive stop -> ok end end), + ok = rpc(mim(), mongoose_start_node_id, register_on_remote_node_rpc, [node(), Id, Pid]), + GetF = fun() -> rpc(mim(), mongoose_start_node_id, node_id_to_name, [Id]) end, + mongoose_helper:wait_until(GetF, {ok, node()}), + Pid ! stop, + mongoose_helper:wait_until(GetF, {error, unknown_id}). From 8978847ce6fdb7e175ae4a2118fcc04cbeb7e6ab Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 23 Jun 2023 14:04:01 +0200 Subject: [PATCH 050/161] Imrove mongoose_cets_discovery_rdbms debugging info --- src/mongoose_cets_discovery_rdbms.erl | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/mongoose_cets_discovery_rdbms.erl b/src/mongoose_cets_discovery_rdbms.erl index 806128c7a32..b164ff18d08 100644 --- a/src/mongoose_cets_discovery_rdbms.erl +++ b/src/mongoose_cets_discovery_rdbms.erl @@ -28,7 +28,8 @@ get_nodes(State = #{cluster_name := ClusterName, node_name_to_insert := Node}) - {{error, Reason}, State} end. -try_register(ClusterName, Node) -> +try_register(ClusterName, NodeBin) when is_binary(NodeBin), is_binary(ClusterName) -> + Node = binary_to_atom(NodeBin), prepare(), {selected, Rows} = select(ClusterName), Pairs = [{binary_to_atom(NodeBin), Num} || {NodeBin, Num} <- Rows], @@ -38,15 +39,19 @@ try_register(ClusterName, Node) -> NodeNum = case AlreadyRegistered of true -> - update_existing(ClusterName, Node, Timestamp), + update_existing(ClusterName, NodeBin, Timestamp), {value, {_, Num}} = lists:keysearch(Node, 1, Pairs), Num; false -> Num = next_free_num(lists:usort(Nums)), - insert_new(ClusterName, Node, Timestamp, Num), + %% Could fail with duplicate node_num reason. + %% In this case just wait for the next get_nodes call. + insert_new(ClusterName, NodeBin, Timestamp, Num), Num end, - Info = #{already_registered => AlreadyRegistered, timestamp => Timestamp, node_num => Num}, + %% This could be used for debugging + Info = #{already_registered => AlreadyRegistered, timestamp => Timestamp, + node_num => Num, last_rows => Rows}, {NodeNum, Nodes, Info}. prepare() -> From 2b9d616107dbc56fa8a3021de44462ada0aaac59 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 28 Jun 2023 20:49:31 +0200 Subject: [PATCH 051/161] Use lookup_opt to check if mnesia is disabled --- src/ejabberd_app.erl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/ejabberd_app.erl b/src/ejabberd_app.erl index 7d77de129e6..03fdb53d4bb 100644 --- a/src/ejabberd_app.erl +++ b/src/ejabberd_app.erl @@ -118,12 +118,12 @@ stop(_State) -> %%% Internal functions %%% db_init() -> - case mongoose_config:get_opt([internal_databases, mnesia], disabled) of - disabled -> - ok; - _ -> + case mongoose_config:lookup_opt([internal_databases, mnesia]) of + {ok, _} -> db_init_mnesia(), - mongoose_node_num_mnesia:init() + mongoose_node_num_mnesia:init(); + {error, _} -> + ok end. db_init_mnesia() -> From b6a1ddefec8a721abf026ce8007a97486ba546c7 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 28 Jun 2023 20:50:24 +0200 Subject: [PATCH 052/161] Start mnesia permanently --- src/ejabberd_app.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ejabberd_app.erl b/src/ejabberd_app.erl index 03fdb53d4bb..b375116b5dd 100644 --- a/src/ejabberd_app.erl +++ b/src/ejabberd_app.erl @@ -136,7 +136,7 @@ db_init_mnesia() -> _ -> ok end, - mnesia:start(), + application:start(mnesia, permanent), mnesia:wait_for_tables(mnesia:system_info(local_tables), infinity). -spec broadcast_c2s_shutdown_listeners() -> ok. From a9ab4416540365e80cfef92de8932c7945aeb453 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 29 Jun 2023 09:40:05 +0200 Subject: [PATCH 053/161] Fix typo --- src/ejabberd_sup.erl | 4 ++-- src/mongoose_cets_discovery_rdbms.erl | 2 +- src/mongoose_start_node_id.erl | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/ejabberd_sup.erl b/src/ejabberd_sup.erl index 6d8f6bf11e5..0e84c454be3 100644 --- a/src/ejabberd_sup.erl +++ b/src/ejabberd_sup.erl @@ -152,11 +152,11 @@ init([]) -> PG = {pg, {pg, start_link, [mim_scope]}, - permanent, infinity, supervisor, [pg]}, + permanent, infinity, worker, [pg]}, StartIdServer = {mongoose_start_node_id, {mongoose_start_node_id, start_link, []}, - permanent, infinity, supervisor, [mongoose_start_node_id]}, + permanent, infinity, worker, [mongoose_start_node_id]}, {ok, {{one_for_one, 10, 1}, [StartIdServer, PG, diff --git a/src/mongoose_cets_discovery_rdbms.erl b/src/mongoose_cets_discovery_rdbms.erl index b164ff18d08..fea3b287995 100644 --- a/src/mongoose_cets_discovery_rdbms.erl +++ b/src/mongoose_cets_discovery_rdbms.erl @@ -9,7 +9,7 @@ -include_lib("eunit/include/eunit.hrl"). -endif. --type opts() :: #{cluster_name => binary(), node_name_to_insert => binary(), last_query_info => term()}. +-type opts() :: #{cluster_name => binary(), node_name_to_insert => binary(), last_query_info => map()}. -type state() :: opts(). -spec init(opts()) -> state(). diff --git a/src/mongoose_start_node_id.erl b/src/mongoose_start_node_id.erl index 82dbd1047b9..85be91312a3 100644 --- a/src/mongoose_start_node_id.erl +++ b/src/mongoose_start_node_id.erl @@ -1,4 +1,4 @@ -%% Generates an unique ID on the node start. +%% Generates a unique ID on the node start. %% Registers the ID on all other nodes. %% Used in ejabberd_local to find to which node to route IQ responses. -module(mongoose_start_node_id). From 80077e2d40a9ea010777fb7326e7b18744105b19 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 29 Jun 2023 09:43:33 +0200 Subject: [PATCH 054/161] Moved mongoose_node_num:set_node_num away from try..catch --- src/mongoose_cets_discovery_rdbms.erl | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/mongoose_cets_discovery_rdbms.erl b/src/mongoose_cets_discovery_rdbms.erl index fea3b287995..9ddaa061864 100644 --- a/src/mongoose_cets_discovery_rdbms.erl +++ b/src/mongoose_cets_discovery_rdbms.erl @@ -19,9 +19,11 @@ init(Opts = #{cluster_name := _, node_name_to_insert := _}) -> -spec get_nodes(state()) -> {cets_discovery:get_nodes_result(), state()}. get_nodes(State = #{cluster_name := ClusterName, node_name_to_insert := Node}) -> try - {Num, Nodes, Info} = try_register(ClusterName, Node), - mongoose_node_num:set_node_num(Num), - {{ok, Nodes}, State#{last_query_info => Info}} + try_register(ClusterName, Node) + of + {Num, Nodes, Info} -> + mongoose_node_num:set_node_num(Num), + {{ok, Nodes}, State#{last_query_info => Info}} catch Class:Reason:Stacktrace -> ?LOG_ERROR(#{what => discovery_failed_select, class => Class, reason => Reason, stacktrace => Stacktrace}), From b83dc98e382138f7caf4ffea00cf35e9d9e45169 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 29 Jun 2023 09:44:17 +0200 Subject: [PATCH 055/161] Fix shadowed variable NodeBin --- src/mongoose_cets_discovery_rdbms.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mongoose_cets_discovery_rdbms.erl b/src/mongoose_cets_discovery_rdbms.erl index 9ddaa061864..87b81371e77 100644 --- a/src/mongoose_cets_discovery_rdbms.erl +++ b/src/mongoose_cets_discovery_rdbms.erl @@ -34,7 +34,7 @@ try_register(ClusterName, NodeBin) when is_binary(NodeBin), is_binary(ClusterNam Node = binary_to_atom(NodeBin), prepare(), {selected, Rows} = select(ClusterName), - Pairs = [{binary_to_atom(NodeBin), Num} || {NodeBin, Num} <- Rows], + Pairs = [{binary_to_atom(DbNodeBin), Num} || {DbNodeBin, Num} <- Rows], {Nodes, Nums} = lists:unzip(Pairs), AlreadyRegistered = lists:member(Node, Nodes), Timestamp = timestamp(), From 57a29b5810edd749ca2c4dfad6b0e092b22aeaad Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 30 Jun 2023 20:19:26 +0200 Subject: [PATCH 056/161] Add mod_bosh_cets --- big_tests/test.config | 1 + rel/files/mongooseim.toml | 3 ++ src/mod_bosh.erl | 10 +++++-- src/mod_bosh_cets.erl | 58 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 69 insertions(+), 3 deletions(-) create mode 100644 src/mod_bosh_cets.erl diff --git a/big_tests/test.config b/big_tests/test.config index 0c4422b7f82..7464c642094 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -240,6 +240,7 @@ {pgsql_cets, [{dbs, [redis, pgsql]}, {sm_backend, "\"cets\""}, + {bosh_backend, "\"cets\""}, {stream_management_backend, cets}, {auth_method, "rdbms"}, {internal_databases, "[internal_databases.cets] diff --git a/rel/files/mongooseim.toml b/rel/files/mongooseim.toml index fea8ca43c63..ed4ca9e0b1a 100644 --- a/rel/files/mongooseim.toml +++ b/rel/files/mongooseim.toml @@ -246,6 +246,9 @@ {{{mod_vcard}}} {{/mod_vcard}} [modules.mod_bosh] + {{#bosh_backend}} + backend = {{{bosh_backend}}} + {{/bosh_backend}} [modules.mod_carboncopy] diff --git a/src/mod_bosh.erl b/src/mod_bosh.erl index 754a652c928..b8ab9fe14fe 100644 --- a/src/mod_bosh.erl +++ b/src/mod_bosh.erl @@ -341,10 +341,10 @@ maybe_start_session_on_known_host(HostType, Req, Body, Opts) -> try maybe_start_session_on_known_host_unsafe(HostType, Req, Body, Opts) catch - error:Reason -> + error:Reason:Stacktrace -> %% It's here because something catch-y was here before ?LOG_ERROR(#{what => bosh_stop, issue => undefined_condition, - reason => Reason}), + reason => Reason, stacktrace => Stacktrace}), Req1 = terminal_condition(<<"undefined-condition">>, [], Req), {false, Req1} end. @@ -373,9 +373,13 @@ start_session(HostType, Peer, PeerCert, Body, Opts) -> store_session(Sid, Socket) -> mod_bosh_backend:create_session(#bosh_session{sid = Sid, socket = Socket}). +%% MUST be unique and unpredictable +%% https://xmpp.org/extensions/xep-0124.html#security-sidrid +%% Also, CETS requires to use node as a part of the key +%% (but if the key is always random CETS is happy with that too) -spec make_sid() -> binary(). make_sid() -> - mongoose_bin:encode_crypto(term_to_binary(make_ref())). + base16:encode(crypto:strong_rand_bytes(20)). %%-------------------------------------------------------------------- %% HTTP errors diff --git a/src/mod_bosh_cets.erl b/src/mod_bosh_cets.erl new file mode 100644 index 00000000000..bea57e3fdbf --- /dev/null +++ b/src/mod_bosh_cets.erl @@ -0,0 +1,58 @@ +-module(mod_bosh_cets). + +-behaviour(mod_bosh_backend). + +%% mod_bosh_backend callbacks +-export([start/0, + create_session/1, + delete_session/1, + get_session/1, + get_sessions/0, + node_cleanup/1]). + +-include("mod_bosh.hrl"). + +-define(TABLE, cets_bosh). + +-spec start() -> any(). +start() -> + cets:start(?TABLE, #{}), + cets_discovery:add_table(mongoose_cets_discovery, ?TABLE). + +%% Session key (sid) is unique, so we don't expect conflicts +%% So, the confict resolution could be avoided +-spec create_session(mod_bosh:session()) -> any(). +create_session(Session) -> + cets:insert(?TABLE, session_to_tuple(Session)). + +-spec delete_session(mod_bosh:sid()) -> any(). +delete_session(Sid) -> + cets:delete(?TABLE, Sid). + +-spec get_session(mod_bosh:sid()) -> [mod_bosh:session()]. +get_session(Sid) -> + tuples_to_records(ets:lookup(?TABLE, Sid)). + +-spec get_sessions() -> [mod_bosh:session()]. +get_sessions() -> + tuples_to_records(ets:tab2list(?TABLE)). + +-spec node_cleanup(atom()) -> any(). +node_cleanup(Node) -> + Guard = {'==', {node, '$1'}, Node}, + R = {'_', '$1'}, + cets:sync(?TABLE), + %% We don't need to replicate deletes + %% We remove the local content here + ets:select_delete(?TABLE, [{R, [Guard], [true]}]), + ok. + +%% Simple format conversion +session_to_tuple(#bosh_session{sid = Sid, socket = Pid}) when is_pid(Pid) -> + {Sid, Pid}. + +tuples_to_records(Tuples) -> + [tuple_to_record(Tuple) || Tuple <- Tuples]. + +tuple_to_record({Sid, Pid}) -> + #bosh_session{sid = Sid, socket = Pid}. From dee22c43e41654ce0e656e82192521b3695b1275 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 10 Jul 2023 12:52:49 +0200 Subject: [PATCH 057/161] Update docs --- doc/modules/mod_bosh.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/mod_bosh.md b/doc/modules/mod_bosh.md index d04aadab07c..935a56e4014 100644 --- a/doc/modules/mod_bosh.md +++ b/doc/modules/mod_bosh.md @@ -13,7 +13,7 @@ If you want to use BOSH, you must enable it both in the `listen` section of * **Default:** `"mnesia"` * **Example:** `backend = "mnesia"` -Backend to use for storing BOSH connections. Currently only `"mnesia"` is supported. +Backend to use for storing BOSH connections. `"cets"`, `"mnesia"` are supported. ### `modules.mod_bosh.inactivity` * **Syntax:** positive integer or the string `"infinity"` From 72975d115b20e471afcc9a26b31177745a31365c Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 10 Jul 2023 12:58:00 +0200 Subject: [PATCH 058/161] Use records in mod_bosh_cets --- rebar.lock | 2 +- src/mod_bosh_cets.erl | 20 +++++--------------- 2 files changed, 6 insertions(+), 16 deletions(-) diff --git a/rebar.lock b/rebar.lock index 12f14278b9d..b74bc09a396 100644 --- a/rebar.lock +++ b/rebar.lock @@ -8,7 +8,7 @@ {<<"certifi">>,{pkg,<<"certifi">>,<<"2.9.0">>},1}, {<<"cets">>, {git,"https://github.com/esl/cets.git", - {ref,"351221c7a2f2c64f7ebc163428f8d340b71705ac"}}, + {ref,"c4f47edbe1bc7d467986c8e9dca56991c14f6a77"}}, 0}, {<<"cowboy">>,{pkg,<<"cowboy">>,<<"2.9.0">>},0}, {<<"cowboy_swagger">>,{pkg,<<"cowboy_swagger">>,<<"2.5.1">>},0}, diff --git a/src/mod_bosh_cets.erl b/src/mod_bosh_cets.erl index bea57e3fdbf..18b338c656e 100644 --- a/src/mod_bosh_cets.erl +++ b/src/mod_bosh_cets.erl @@ -16,14 +16,14 @@ -spec start() -> any(). start() -> - cets:start(?TABLE, #{}), + cets:start(?TABLE, #{keypos => 2}), cets_discovery:add_table(mongoose_cets_discovery, ?TABLE). %% Session key (sid) is unique, so we don't expect conflicts %% So, the confict resolution could be avoided -spec create_session(mod_bosh:session()) -> any(). create_session(Session) -> - cets:insert(?TABLE, session_to_tuple(Session)). + cets:insert(?TABLE, Session). -spec delete_session(mod_bosh:sid()) -> any(). delete_session(Sid) -> @@ -31,28 +31,18 @@ delete_session(Sid) -> -spec get_session(mod_bosh:sid()) -> [mod_bosh:session()]. get_session(Sid) -> - tuples_to_records(ets:lookup(?TABLE, Sid)). + ets:lookup(?TABLE, Sid). -spec get_sessions() -> [mod_bosh:session()]. get_sessions() -> - tuples_to_records(ets:tab2list(?TABLE)). + ets:tab2list(?TABLE). -spec node_cleanup(atom()) -> any(). node_cleanup(Node) -> Guard = {'==', {node, '$1'}, Node}, - R = {'_', '$1'}, + R = {'_', '_', '$1'}, cets:sync(?TABLE), %% We don't need to replicate deletes %% We remove the local content here ets:select_delete(?TABLE, [{R, [Guard], [true]}]), ok. - -%% Simple format conversion -session_to_tuple(#bosh_session{sid = Sid, socket = Pid}) when is_pid(Pid) -> - {Sid, Pid}. - -tuples_to_records(Tuples) -> - [tuple_to_record(Tuple) || Tuple <- Tuples]. - -tuple_to_record({Sid, Pid}) -> - #bosh_session{sid = Sid, socket = Pid}. From 46fe85a55baa2c376f4a7c673dfa3f34a142c3c0 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 30 Jun 2023 01:09:18 +0200 Subject: [PATCH 059/161] Move code into mongoose_component Make mongoose_component_backend --- big_tests/tests/component_helper.erl | 2 +- src/component/mongoose_component.erl | 171 ++++++++++++ src/component/mongoose_component_backend.erl | 47 ++++ src/component/mongoose_component_mnesia.erl | 68 +++++ src/config/mongoose_config_spec.erl | 4 + src/ejabberd_router.erl | 253 +----------------- src/ejabberd_service.erl | 20 +- src/mod_disco.erl | 4 +- src/mongoose_router_external.erl | 2 +- src/mongoose_router_external_localnode.erl | 2 +- .../mongoose_system_metrics_collector.erl | 4 +- test/component_reg_SUITE.erl | 42 +-- test/router_SUITE.erl | 55 +--- 13 files changed, 336 insertions(+), 338 deletions(-) create mode 100644 src/component/mongoose_component.erl create mode 100644 src/component/mongoose_component_backend.erl create mode 100644 src/component/mongoose_component_mnesia.erl diff --git a/big_tests/tests/component_helper.erl b/big_tests/tests/component_helper.erl index 05d0fa04a7e..2a4f450c678 100644 --- a/big_tests/tests/component_helper.erl +++ b/big_tests/tests/component_helper.erl @@ -45,7 +45,7 @@ disconnect_component(Component, Addr) -> disconnect_components(Components, Addr) -> %% TODO replace 'kill' with 'stop' when server supports stream closing [escalus_connection:kill(Component) || Component <- Components], - mongoose_helper:wait_until(fun() -> rpc(ejabberd_router, lookup_component, [Addr]) =:= [] end, true, + mongoose_helper:wait_until(fun() -> rpc(mongoose_component, lookup_component, [Addr]) =:= [] end, true, #{name => rpc}). rpc(M, F, A) -> diff --git a/src/component/mongoose_component.erl b/src/component/mongoose_component.erl new file mode 100644 index 00000000000..54bc1e584e6 --- /dev/null +++ b/src/component/mongoose_component.erl @@ -0,0 +1,171 @@ +-module(mongoose_component). +%% API +-export([has_component/1, + dirty_get_all_components/1, + register_components/4, + unregister_components/1, + lookup_component/1, + lookup_component/2]). + +-export([start/0, stop/0]). +-export([node_cleanup/3]). + +-include("mongoose.hrl"). +-include("jlib.hrl"). +-include("external_component.hrl"). + +-type domain() :: jid:server(). + +-type external_component() :: #external_component{domain :: domain(), + handler :: mongoose_packet_handler:t(), + is_hidden :: boolean()}. + +-export_type([external_component/0]). + +% Not simple boolean() because is probably going to support third value in the future: only_hidden. +% Besides, it increases readability. +-type return_hidden() :: only_public | all. + +-export_type([return_hidden/0]). + +%%==================================================================== +%% API +%%==================================================================== + +start() -> + Backend = mongoose_config:get_opt(component_backend), + mongoose_component_backend:init(#{backend => Backend}), + gen_hook:delete_handlers(hooks()). + +stop() -> + gen_hook:add_handlers(hooks()). + +-spec hooks() -> [gen_hook:hook_tuple()]. +hooks() -> + [{node_cleanup, global, fun ?MODULE:node_cleanup/3, #{}, 90}]. + +-spec register_components([Domain :: domain()], + Node :: node(), + Handler :: mongoose_packet_handler:t(), + AreHidden :: boolean()) -> {ok, [external_component()]} | {error, any()}. +register_components(Domains, Node, Handler, AreHidden) -> + try + register_components_unsafe(Domains, Node, Handler, AreHidden) + catch Class:Reason:Stacktrace -> + ?LOG_ERROR(#{what => component_register_failed, + class => Class, reason => Reason, stacktrace => Stacktrace}), + {error, Reason} + end. + +register_components_unsafe(Domains, Node, Handler, AreHidden) -> + LDomains = prepare_ldomains(Domains), + Components = make_components(LDomains, Node, Handler, AreHidden), + assert_can_register_components(Components), + register_components(Components), + %% We do it outside of Mnesia transaction + lists:foreach(fun run_register_hook/1, Components), + {ok, Components}. + +register_components(Components) -> + mongoose_component_backend:register_components(Components). + +make_components(LDomains, Node, Handler, AreHidden) -> + [make_record_component(LDomain, Handler, Node, AreHidden) || LDomain <- LDomains]. + +make_record_component(LDomain, Handler, Node, IsHidden) -> + #external_component{domain = LDomain, handler = Handler, + node = Node, is_hidden = IsHidden}. + +run_register_hook(#external_component{domain = LDomain, is_hidden = IsHidden}) -> + mongoose_hooks:register_subhost(LDomain, IsHidden), + ok. + +run_unregister_hook(#external_component{domain = LDomain}) -> + mongoose_hooks:unregister_subhost(LDomain), + ok. + +-spec unregister_components(Components :: [external_component()]) -> ok. +unregister_components(Components) -> + lists:foreach(fun run_unregister_hook/1, Components), + mongoose_component_backend:unregister_components(Components). + +assert_can_register_components(Components) -> + Checks = lists:map(fun is_already_registered/1, Components), + Zip = lists:zip(Components, Checks), + ConfictDomains = + [LDomain || {#external_component{domain = LDomain}, true} <- Zip], + case ConfictDomains of + [] -> + ok; + _ -> + error({routes_already_exist, ConfictDomains}) + end. + +%% Returns true if any component route is registered for the domain. +-spec has_component(jid:lserver()) -> boolean(). +has_component(Domain) -> + [] =/= lookup_component(Domain). + +%% @doc Check if the component/route is already registered somewhere. +-spec is_already_registered(external_component()) -> boolean(). +is_already_registered(#external_component{domain = LDomain, node = Node}) -> + has_dynamic_domains(LDomain) + orelse has_domain_route(LDomain) + orelse has_component_registered(LDomain, Node). + +has_dynamic_domains(LDomain) -> + {error, not_found} =/= mongoose_domain_api:get_host_type(LDomain). + +%% check that route for this domain is not already registered +has_domain_route(LDomain) -> + no_route =/= mongoose_router:lookup_route(LDomain). + +%% check that there is no component registered globally for this node +has_component_registered(LDomain, Node) -> + no_route =/= get_component(LDomain, Node). + +%% Find a component registered globally for this node (internal use) +get_component(LDomain, Node) -> + filter_component(lookup_component(LDomain), Node). + +filter_component([], _) -> + no_route; +filter_component([Comp|Tail], Node) -> + case Comp of + #external_component{node = Node} -> + Comp; + _ -> + filter_component(Tail, Node) + end. + +%% @doc Returns a list of components registered for this domain by any node, +%% the choice is yours. +-spec lookup_component(Domain :: jid:lserver()) -> [external_component()]. +lookup_component(Domain) -> + mongoose_component_backend:lookup_component(Domain). + +%% @doc Returns a list of components registered for this domain at the given node. +%% (must be only one, or nothing) +-spec lookup_component(Domain :: jid:lserver(), Node :: node()) -> [external_component()]. +lookup_component(Domain, Node) -> + mongoose_component_backend:lookup_component(Domain, Node). + +-spec dirty_get_all_components(return_hidden()) -> [jid:lserver()]. +dirty_get_all_components(ReturnHidden) -> + mongoose_component_backend:get_all_components(ReturnHidden). + +-spec node_cleanup(map(), map(), map()) -> {ok, map()}. +node_cleanup(Acc, #{node := Node}, _) -> + mongoose_component_backend:node_cleanup(Node), + {ok, maps:put(?MODULE, ok, Acc)}. + +prepare_ldomains(Domains) -> + LDomains = [jid:nameprep(Domain) || Domain <- Domains], + Zip = lists:zip(Domains, LDomains), + InvalidDomains = [Domain || {Domain, error} <- Zip], + case InvalidDomains of + [] -> + LDomains; + _ -> + error({invalid_domains, InvalidDomains}) + end. diff --git a/src/component/mongoose_component_backend.erl b/src/component/mongoose_component_backend.erl new file mode 100644 index 00000000000..e26ceeeda3f --- /dev/null +++ b/src/component/mongoose_component_backend.erl @@ -0,0 +1,47 @@ +-module(mongoose_component_backend). + +-callback init(map()) -> any(). + +-callback node_cleanup(node()) -> ok. + +-export([init/1, + node_cleanup/1, + register_components/1, + unregister_components/1, + lookup_component/1, + lookup_component/2, + get_all_components/1]). + +-ignore_xref([behaviour_info/1]). + +-define(MAIN_MODULE, mongoose_component). + +-spec init(map()) -> any(). +init(Opts) -> + Args = [Opts], + mongoose_backend:init(global, ?MAIN_MODULE, [], Opts), + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +node_cleanup(Node) -> + Args = [Node], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +register_components(Components) -> + Args = [Components], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +unregister_components(Components) -> + Args = [Components], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +lookup_component(Domain) -> + Args = [Domain], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +lookup_component(Domain, Node) -> + Args = [Domain, Node], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +get_all_components(ReturnHidden) -> + Args = [ReturnHidden], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). diff --git a/src/component/mongoose_component_mnesia.erl b/src/component/mongoose_component_mnesia.erl new file mode 100644 index 00000000000..3e5d7621c48 --- /dev/null +++ b/src/component/mongoose_component_mnesia.erl @@ -0,0 +1,68 @@ +-module(mongoose_component_mnesia). +-behaviour(mongoose_component_backend). + +-export([init/1, + node_cleanup/1, + register_components/1, + unregister_components/1, + lookup_component/1, + lookup_component/2, + get_all_components/1]). + +-include("external_component.hrl"). + +init(_) -> + update_tables(), + %% add distributed service_component routes + mnesia:create_table(external_component, + [{attributes, record_info(fields, external_component)}, + {type, bag}, {ram_copies, [node()]}]), + mnesia:add_table_copy(external_component, node(), ram_copies). + +update_tables() -> + case catch mnesia:table_info(external_component, attributes) of + [domain, handler, node] -> + mnesia:delete_table(external_component); + [domain, handler, node, is_hidden] -> + ok; + {'EXIT', _} -> + ok + end. + +node_cleanup(Node) -> + Entries = mnesia:dirty_match_object(external_component, + #external_component{node = Node, _ = '_'}), + [mnesia:dirty_delete_object(external_component, Entry) || Entry <- Entries], + ok. + +register_components(Components) -> + F = fun() -> + lists:foreach(fun mnesia:write/1, Components) + end, + case mnesia:transaction(F) of + {atomic, ok} -> ok; + {aborted, Reason} -> error({mnesia_aborted_write, Reason}) + end. + +unregister_components(Components) -> + F = fun() -> + lists:foreach(fun do_unregister_component/1, Components) + end, + {atomic, ok} = mnesia:transaction(F), + ok. + +do_unregister_component(Component) -> + ok = mnesia:delete_object(external_component, Component, write). + +lookup_component(Domain) -> + mnesia:dirty_read(external_component, Domain). + +lookup_component(Domain, Node) -> + mnesia:dirty_match_object(external_component, + #external_component{domain = Domain, node = Node, _ = '_'}). + +get_all_components(all) -> + mnesia:dirty_all_keys(external_component); +get_all_components(only_public) -> + MatchNonHidden = {#external_component{ domain = '$1', is_hidden = false, _ = '_' }, [], ['$1']}, + mnesia:dirty_select(external_component, [MatchNonHidden]). diff --git a/src/config/mongoose_config_spec.erl b/src/config/mongoose_config_spec.erl index 34e5e615291..40b88044e69 100644 --- a/src/config/mongoose_config_spec.erl +++ b/src/config/mongoose_config_spec.erl @@ -172,6 +172,9 @@ general() -> <<"sm_backend">> => #option{type = atom, validate = {module, ejabberd_sm}, wrap = global_config}, + <<"component_backend">> => #option{type = atom, + validate = {module, mongoose_component}, + wrap = global_config}, <<"max_fsm_queue">> => #option{type = integer, validate = positive, wrap = global_config}, @@ -210,6 +213,7 @@ general_defaults() -> <<"language">> => <<"en">>, <<"all_metrics_are_global">> => false, <<"sm_backend">> => mnesia, + <<"component_backend">> => mnesia, <<"rdbms_server_type">> => generic, <<"mongooseimctl_access_commands">> => #{}, <<"routing_modules">> => mongoose_router:default_routing_modules(), diff --git a/src/ejabberd_router.erl b/src/ejabberd_router.erl index 6abaa02a7fe..41b17072a77 100644 --- a/src/ejabberd_router.erl +++ b/src/ejabberd_router.erl @@ -31,55 +31,20 @@ -export([route/3, route/4, route_error/4, - route_error_reply/4, - is_component_dirty/1, - dirty_get_all_components/1, - register_components/2, - register_components/3, - register_components/4, - register_component/2, - register_component/3, - register_component/4, - lookup_component/1, - lookup_component/2, - unregister_component/1, - unregister_component/2, - unregister_components/1, - unregister_components/2 - ]). + route_error_reply/4]). -export([start_link/0]). --export([routes_cleanup_on_nodedown/3]). %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). -%% debug exports for tests --export([update_tables/0]). - --ignore_xref([register_component/2, register_component/3, register_component/4, - register_components/2, register_components/3, route_error/4, start_link/0, - unregister_component/1, unregister_component/2, unregister_components/2, - unregister_routes/1, update_tables/0]). +-ignore_xref([route_error/4, start_link/0]). -include("mongoose.hrl"). -include("jlib.hrl"). --include("external_component.hrl"). -record(state, {}). --type domain() :: binary(). - --type external_component() :: #external_component{domain :: domain(), - handler :: mongoose_packet_handler:t(), - is_hidden :: boolean()}. - -% Not simple boolean() because is probably going to support third value in the future: only_hidden. -% Besides, it increases readability. --type return_hidden() :: only_public | all. - --export_type([return_hidden/0]). - %%==================================================================== %% API %%==================================================================== @@ -164,191 +129,13 @@ route_error_reply(From, To, Acc, Error) -> {Acc1, ErrorReply} = jlib:make_error_reply(Acc, Error), route_error(From, To, Acc1, ErrorReply). - --spec register_components([Domain :: domain()], - Handler :: mongoose_packet_handler:t()) -> ok | {error, any()}. -register_components(Domains, Handler) -> - register_components(Domains, node(), Handler). - --spec register_components([Domain :: domain()], - Node :: node(), - Handler :: mongoose_packet_handler:t()) -> ok | {error, any()}. -register_components(Domains, Node, Handler) -> - register_components(Domains, Node, Handler, false). - --spec register_components([Domain :: domain()], - Node :: node(), - Handler :: mongoose_packet_handler:t(), - AreHidden :: boolean()) -> ok | {error, any()}. -register_components(Domains, Node, Handler, AreHidden) -> - LDomains = [{jid:nameprep(Domain), Domain} || Domain <- Domains], - F = fun() -> - [do_register_component(LDomain, Handler, Node, AreHidden) || LDomain <- LDomains], - ok - end, - case mnesia:transaction(F) of - {atomic, ok} -> ok; - {aborted, Reason} -> {error, Reason} - end. - -%% @doc -%% components are registered in two places: external_components table as local components -%% and external_components_global as globals. Registration should be done by register_component/1 -%% or register_components/1, which registers them for current node; the arity 2 funcs are -%% here for testing. --spec register_component(Domain :: domain(), - Handler :: mongoose_packet_handler:t()) -> ok | {error, any()}. -register_component(Domain, Handler) -> - register_component(Domain, node(), Handler). - --spec register_component(Domain :: domain(), - Node :: node(), - Handler :: mongoose_packet_handler:t()) -> ok | {error, any()}. -register_component(Domain, Node, Handler) -> - register_component(Domain, Node, Handler, false). - --spec register_component(Domain :: domain(), - Node :: node(), - Handler :: mongoose_packet_handler:t(), - IsHidden :: boolean()) -> ok | {error, any()}. -register_component(Domain, Node, Handler, IsHidden) -> - register_components([Domain], Node, Handler, IsHidden). - -do_register_component({error, Domain}, _Handler, _Node, _IsHidden) -> - error({invalid_domain, Domain}); -do_register_component({LDomain, _}, Handler, Node, IsHidden) -> - case check_component(LDomain, Node) of - ok -> - ComponentGlobal = #external_component{domain = LDomain, handler = Handler, - node = Node, is_hidden = IsHidden}, - mnesia:write(external_component_global, ComponentGlobal, write), - NDomain = {LDomain, Node}, - Component = #external_component{domain = NDomain, handler = Handler, - node = Node, is_hidden = IsHidden}, - mnesia:write(Component), - mongoose_hooks:register_subhost(LDomain, IsHidden); - _ -> mnesia:abort(route_already_exists) - end. - -%% @doc Check if the component/route is already registered somewhere; ok means it is not, so we are -%% ok to proceed, anything else means the domain/node pair is already serviced. -%% true and false are there because that's how orelse works. --spec check_component(binary(), Node :: node()) -> ok | error. -check_component(LDomain, Node) -> - case check_dynamic_domains(LDomain) - orelse check_component_route(LDomain) - orelse check_component_local(LDomain, Node) - orelse check_component_global(LDomain, Node) of - true -> error; - false -> ok - end. - -check_dynamic_domains(LDomain)-> - {error, not_found} =/= mongoose_domain_api:get_host_type(LDomain). - -%% check that route for this domain is not already registered -check_component_route(LDomain) -> - no_route =/= mongoose_router:lookup_route(LDomain). - -%% check that there is no local component for domain:node pair -check_component_local(LDomain, Node) -> - NDomain = {LDomain, Node}, - [] =/= mnesia:read(external_component, NDomain). - -%% check that there is no component registered globally for this node -check_component_global(LDomain, Node) -> - undefined =/= get_global_component(LDomain, Node). - -%% Find a component registered globally for this node (internal use) -get_global_component([], _) -> - undefined; -get_global_component([Comp|Tail], Node) -> - case Comp of - #external_component{node = Node} -> - Comp; - _ -> - get_global_component(Tail, Node) - end; -get_global_component(LDomain, Node) -> - get_global_component(mnesia:read(external_component_global, LDomain), Node). - - --spec unregister_components([Domains :: domain()]) -> {atomic, ok}. -unregister_components(Domains) -> - unregister_components(Domains, node()). --spec unregister_components([Domains :: domain()], Node :: node()) -> {atomic, ok}. -unregister_components(Domains, Node) -> - LDomains = [{jid:nameprep(Domain), Domain} || Domain <- Domains], - F = fun() -> - [do_unregister_component(LDomain, Node) || LDomain <- LDomains], - ok - end, - {atomic, ok} = mnesia:transaction(F). - -do_unregister_component({error, Domain}, _Node) -> - error({invalid_domain, Domain}); -do_unregister_component({LDomain, _}, Node) -> - case get_global_component(LDomain, Node) of - undefined -> - ok; - Comp -> - ok = mnesia:delete_object(external_component_global, Comp, write) - end, - ok = mnesia:delete({external_component, {LDomain, Node}}), - mongoose_hooks:unregister_subhost(LDomain), - ok. - --spec unregister_component(Domain :: domain()) -> {atomic, ok}. -unregister_component(Domain) -> - unregister_components([Domain]). - --spec unregister_component(Domain :: domain(), Node :: node()) -> {atomic, ok}. -unregister_component(Domain, Node) -> - unregister_components([Domain], Node). - -%% @doc Returns a list of components registered for this domain by any node, -%% the choice is yours. --spec lookup_component(Domain :: jid:lserver()) -> [external_component()]. -lookup_component(Domain) -> - mnesia:dirty_read(external_component_global, Domain). - -%% @doc Returns a list of components registered for this domain at the given node. -%% (must be only one, or nothing) --spec lookup_component(Domain :: jid:lserver(), Node :: node()) -> [external_component()]. -lookup_component(Domain, Node) -> - mnesia:dirty_read(external_component, {Domain, Node}). - --spec dirty_get_all_components(return_hidden()) -> [jid:lserver()]. -dirty_get_all_components(all) -> - mnesia:dirty_all_keys(external_component_global); -dirty_get_all_components(only_public) -> - MatchNonHidden = {#external_component{ domain = '$1', is_hidden = false, _ = '_' }, [], ['$1']}, - mnesia:dirty_select(external_component_global, [MatchNonHidden]). - --spec is_component_dirty(jid:lserver()) -> boolean(). -is_component_dirty(Domain) -> - [] =/= lookup_component(Domain). - %%==================================================================== %% gen_server callbacks %%==================================================================== init([]) -> - update_tables(), - - %% add distributed service_component routes - mnesia:create_table(external_component, - [{attributes, record_info(fields, external_component)}, - {local_content, true}]), - mnesia:add_table_copy(external_component, node(), ram_copies), - mnesia:create_table(external_component_global, - [{attributes, record_info(fields, external_component)}, - {type, bag}, - {record_name, external_component}]), - mnesia:add_table_copy(external_component_global, node(), ram_copies), mongoose_metrics:ensure_metric(global, routingErrors, spiral), - gen_hook:add_handlers(hooks()), - + mongoose_component:start(), {ok, #state{}}. handle_call(_Request, _From, State) -> @@ -362,7 +149,7 @@ handle_info(_Info, State) -> {noreply, State}. terminate(_Reason, _State) -> - gen_hook:delete_handlers(hooks()), + mongoose_component:stop(), ok. code_change(_OldVsn, State, _Extra) -> @@ -371,9 +158,6 @@ code_change(_OldVsn, State, _Extra) -> %%-------------------------------------------------------------------- %%% Internal functions %%-------------------------------------------------------------------- --spec hooks() -> [gen_hook:hook_tuple()]. -hooks() -> - [{node_cleanup, global, fun ?MODULE:routes_cleanup_on_nodedown/3, #{}, 90}]. routing_modules_list() -> mongoose_config:get_opt(routing_modules). @@ -410,32 +194,3 @@ route(OrigFrom, OrigTo, Acc0, OrigPacket, [M|Tail]) -> class => Class, reason => Reason, stacktrace => Stacktrace}), mongoose_acc:append(router, result, {error, {M, Reason}}, Acc0) end. - -update_tables() -> - case catch mnesia:table_info(external_component, attributes) of - [domain, handler, node] -> - mnesia:delete_table(external_component); - [domain, handler, node, is_hidden] -> - ok; - {'EXIT', _} -> - ok - end, - case catch mnesia:table_info(external_component_global, attributes) of - [domain, handler, node] -> - UpdateFun = fun({external_component, Domain, Handler, Node}) -> - {external_component, Domain, Handler, Node, false} - end, - mnesia:transform_table(external_component_global, UpdateFun, - [domain, handler, node, is_hidden]); - [domain, handler, node, is_hidden] -> - ok; - {'EXIT', _} -> - ok - end. - --spec routes_cleanup_on_nodedown(map(), map(), map()) -> {ok, map()}. -routes_cleanup_on_nodedown(Acc, #{node := Node}, _) -> - Entries = mnesia:dirty_match_object(external_component_global, - #external_component{node = Node, _ = '_'}), - [mnesia:dirty_delete_object(external_component_global, Entry) || Entry <- Entries], - {ok, maps:put(?MODULE, ok, Acc)}. diff --git a/src/ejabberd_service.erl b/src/ejabberd_service.erl index ccdf635e08c..acf30f4b5ec 100644 --- a/src/ejabberd_service.erl +++ b/src/ejabberd_service.erl @@ -71,7 +71,8 @@ hidden_components = false :: boolean(), conflict_behaviour :: conflict_behaviour(), access, - check_from + check_from, + components = [] :: mongoose_component:external_component() }). -type state() :: #state{}. @@ -450,9 +451,9 @@ try_register_routes(StateData) -> try_register_routes(StateData, Retries) -> case register_routes(StateData) of - ok -> + {ok, Components} -> send_element(StateData, #xmlel{name = <<"handshake">>}), - {next_state, stream_established, StateData}; + {next_state, stream_established, StateData#state{components = Components}}; {error, Reason} -> RoutesInfo = lookup_routes(StateData), ConflictBehaviour = StateData#state.conflict_behaviour, @@ -474,7 +475,8 @@ routes_info_to_pids(RoutesInfo) -> mongoose_packet_handler:module(H) =:= ?MODULE]. handle_registration_conflict(kick_old, RoutesInfo, StateData, Retries) when Retries > 0 -> - Pids = routes_info_to_pids(RoutesInfo), + %% see lookup_routes + Pids = lists:usort(routes_info_to_pids(RoutesInfo)), Results = lists:map(fun stop_process/1, Pids), AllOk = lists:all(fun(Result) -> Result =:= ok end, Results), case AllOk of @@ -497,18 +499,18 @@ do_disconnect_on_conflict(StateData) -> lookup_routes(StateData) -> Routes = get_routes(StateData), - [{Route, ejabberd_router:lookup_component(Route)} || Route <- Routes]. + %% Lookup for all pids for the route (both local and global) + [{Route, mongoose_component:lookup_component(Route)} || Route <- Routes]. -spec register_routes(state()) -> any(). register_routes(StateData = #state{hidden_components = AreHidden}) -> Routes = get_routes(StateData), Handler = mongoose_packet_handler:new(?MODULE, #{pid => self()}), - ejabberd_router:register_components(Routes, node(), Handler, AreHidden). + mongoose_component:register_components(Routes, node(), Handler, AreHidden). -spec unregister_routes(state()) -> any(). -unregister_routes(StateData) -> - Routes = get_routes(StateData), - ejabberd_router:unregister_components(Routes). +unregister_routes(#state{components = Components}) -> + mongoose_component:unregister_components(Components). get_routes(#state{host=Subdomain, is_subdomain=true}) -> Hosts = mongoose_config:get_opt(hosts), diff --git a/src/mod_disco.erl b/src/mod_disco.erl index 01de9ce2919..e5565986205 100644 --- a/src/mod_disco.erl +++ b/src/mod_disco.erl @@ -58,7 +58,7 @@ -include("jlib.hrl"). -include("mongoose_config_spec.hrl"). --type return_hidden() :: ejabberd_router:return_hidden(). +-type return_hidden() :: mongoose_component:return_hidden(). -type server_info() :: #{name := binary(), urls := [binary()], modules => module()}. -spec start(mongooseim:host_type(), gen_mod:module_opts()) -> ok. @@ -291,7 +291,7 @@ get_external_components(Domain, ReturnHidden) -> lists:filter( fun(Component) -> check_if_host_is_the_shortest_suffix_for_route(Component, Domain, StaticDomains) - end, ejabberd_router:dirty_get_all_components(ReturnHidden)). + end, mongoose_component:dirty_get_all_components(ReturnHidden)). -spec check_if_host_is_the_shortest_suffix_for_route( Route :: jid:lserver(), Host :: jid:lserver(), VHosts :: [jid:lserver()]) -> boolean(). diff --git a/src/mongoose_router_external.erl b/src/mongoose_router_external.erl index edefe05a0ed..a55a083e705 100644 --- a/src/mongoose_router_external.erl +++ b/src/mongoose_router_external.erl @@ -20,7 +20,7 @@ filter(OrigFrom, OrigTo, OrigAcc, OrigPacket) -> route(From, To, Acc0, Packet) -> LDstDomain = To#jid.lserver, - case ejabberd_router:lookup_component(LDstDomain) of + case mongoose_component:lookup_component(LDstDomain) of [] -> {From, To, Acc0, Packet}; [#external_component{handler = Handler}|_] -> %% may be multiple on various nodes diff --git a/src/mongoose_router_external_localnode.erl b/src/mongoose_router_external_localnode.erl index 96a64dc0dc8..e09441b39cd 100644 --- a/src/mongoose_router_external_localnode.erl +++ b/src/mongoose_router_external_localnode.erl @@ -21,7 +21,7 @@ filter(OrigFrom, OrigTo, OrigAcc, OrigPacket) -> route(From, To, Acc0, Packet) -> LDstDomain = To#jid.lserver, - case ejabberd_router:lookup_component(LDstDomain, node()) of + case mongoose_component:lookup_component(LDstDomain, node()) of [] -> {From, To, Acc0, Packet}; [#external_component{handler = Handler}] -> diff --git a/src/system_metrics/mongoose_system_metrics_collector.erl b/src/system_metrics/mongoose_system_metrics_collector.erl index e658d7aaea0..0432a5d6de0 100644 --- a/src/system_metrics/mongoose_system_metrics_collector.erl +++ b/src/system_metrics/mongoose_system_metrics_collector.erl @@ -129,8 +129,8 @@ get_version() -> end. get_components() -> - Domains = mongoose_router:get_all_domains() ++ ejabberd_router:dirty_get_all_components(all), - Components = [ejabberd_router:lookup_component(D, node()) || D <- Domains], + Domains = mongoose_router:get_all_domains() ++ mongoose_component:dirty_get_all_components(all), + Components = [mongoose_component:lookup_component(D, node()) || D <- Domains], LenComponents = length(lists:flatten(Components)), #{component => LenComponents}. diff --git a/test/component_reg_SUITE.erl b/test/component_reg_SUITE.erl index 0f045c14fb6..8c36c11079f 100644 --- a/test/component_reg_SUITE.erl +++ b/test/component_reg_SUITE.erl @@ -40,11 +40,11 @@ opts() -> registering(_C) -> Dom = <<"aaa.bbb.com">>, - ejabberd_router:register_component(Dom, mongoose_packet_handler:new(?MODULE)), - Lookup = ejabberd_router:lookup_component(Dom), + {ok, Comps} = mongoose_component:register_components([Dom], node(), mongoose_packet_handler:new(?MODULE), false), + Lookup = mongoose_component:lookup_component(Dom), ?assertMatch([#external_component{}], Lookup), - ejabberd_router:unregister_component(Dom), - ?assertMatch([], ejabberd_router:lookup_component(Dom)), + mongoose_component:unregister_components(Comps), + ?assertMatch([], mongoose_component:lookup_component(Dom)), ok. registering_with_local(_C) -> @@ -53,37 +53,37 @@ registering_with_local(_C) -> ThisNode = node(), AnotherNode = 'another@nohost', Handler = mongoose_packet_handler:new(?MODULE), %% This handler is only for testing! - ejabberd_router:register_component(Dom, Handler), + {ok, Comps} = mongoose_component:register_components([Dom], node(), Handler, false), %% we can find it globally - ?assertMatch([#external_component{node = ThisNode}], ejabberd_router:lookup_component(Dom)), + ?assertMatch([#external_component{node = ThisNode}], mongoose_component:lookup_component(Dom)), %% and for this node ?assertMatch([#external_component{node = ThisNode}], - ejabberd_router:lookup_component(Dom, ThisNode)), + mongoose_component:lookup_component(Dom, ThisNode)), %% but not for another node - ?assertMatch([], ejabberd_router:lookup_component(Dom, AnotherNode)), + ?assertMatch([], mongoose_component:lookup_component(Dom, AnotherNode)), %% once we unregister it is not available - ejabberd_router:unregister_component(Dom), - ?assertMatch([], ejabberd_router:lookup_component(Dom)), - ?assertMatch([], ejabberd_router:lookup_component(Dom, ThisNode)), - ?assertMatch([], ejabberd_router:lookup_component(Dom, AnotherNode)), + mongoose_component:unregister_components(Comps), + ?assertMatch([], mongoose_component:lookup_component(Dom)), + ?assertMatch([], mongoose_component:lookup_component(Dom, ThisNode)), + ?assertMatch([], mongoose_component:lookup_component(Dom, AnotherNode)), %% we can register from both nodes - ejabberd_router:register_component(Dom, ThisNode, Handler), + {ok, Comps2} = mongoose_component:register_components([Dom], ThisNode, Handler, false), %% passing node here is only for testing - ejabberd_router:register_component(Dom, AnotherNode, Handler), + {ok, _Comps3} = mongoose_component:register_components([Dom], AnotherNode, Handler, false), %% both are reachable locally ?assertMatch([#external_component{node = ThisNode}], - ejabberd_router:lookup_component(Dom, ThisNode)), + mongoose_component:lookup_component(Dom, ThisNode)), ?assertMatch([#external_component{node = AnotherNode}], - ejabberd_router:lookup_component(Dom, AnotherNode)), + mongoose_component:lookup_component(Dom, AnotherNode)), %% if we try global lookup we get two handlers - ?assertMatch([_, _], ejabberd_router:lookup_component(Dom)), + ?assertMatch([_, _], mongoose_component:lookup_component(Dom)), %% we unregister one and the result is: - ejabberd_router:unregister_component(Dom), - ?assertMatch([], ejabberd_router:lookup_component(Dom, ThisNode)), + mongoose_component:unregister_components(Comps2), + ?assertMatch([], mongoose_component:lookup_component(Dom, ThisNode)), ?assertMatch([#external_component{node = AnotherNode}], - ejabberd_router:lookup_component(Dom)), + mongoose_component:lookup_component(Dom)), ?assertMatch([#external_component{node = AnotherNode}], - ejabberd_router:lookup_component(Dom, AnotherNode)), + mongoose_component:lookup_component(Dom, AnotherNode)), ok. process_packet(_From, _To, _Packet, _Extra) -> diff --git a/test/router_SUITE.erl b/test/router_SUITE.erl index 009c6b59169..37d3295b98c 100644 --- a/test/router_SUITE.erl +++ b/test/router_SUITE.erl @@ -11,8 +11,7 @@ all() -> [ - {group, routing}, - {group, schema} + {group, routing} ]. groups() -> @@ -20,11 +19,7 @@ groups() -> {routing, [], [ basic_routing, do_not_reroute_errors - ]}, - {schema, [], [ - update_tables_hidden_components, - update_tables_hidden_components_idempotent - ]} + ]} ]. init_per_suite(C) -> @@ -45,23 +40,14 @@ init_per_group(routing, Config) -> mongoose_config:set_opt(routing_modules, [xmpp_router_a, xmpp_router_b, xmpp_router_c]), gen_hook:start_link(), ejabberd_router:start_link(), - Config; -init_per_group(schema, Config) -> - remove_component_tables(), Config. end_per_group(routing, _Config) -> - mongoose_config:unset_opt(routing_modules); -end_per_group(schema, _Config) -> - ok. + mongoose_config:unset_opt(routing_modules). init_per_testcase(_CaseName, Config) -> Config. -end_per_testcase(HiddenComponent, _Config) - when HiddenComponent == update_tables_hidden_components; - HiddenComponent == update_tables_hidden_components_idempotent -> - remove_component_tables(); end_per_testcase(_CaseName, _Config) -> ok. @@ -108,26 +94,6 @@ do_not_reroute_errors(_) -> ejabberd_router:route(From, To, Acc, Stanza), ok. -update_tables_hidden_components(_C) -> - %% Tables as of b076e4a62a8b21188245f13c42f9cfd93e06e6b7 - create_component_tables([domain, handler, node]), - - ejabberd_router:update_tables(), - - %% Local table is removed and the distributed one has a new list of attributes - false = lists:member(external_component, mnesia:system_info(tables)), - [domain, handler, node, is_hidden] = mnesia:table_info(external_component_global, attributes). - -update_tables_hidden_components_idempotent(_C) -> - AttrsWithHidden = [domain, handler, node, is_hidden], - create_component_tables(AttrsWithHidden), - - ejabberd_router:update_tables(), - - %% Local table is not removed and the attribute list of the distributed one is not changed - true = lists:member(external_component, mnesia:system_info(tables)), - AttrsWithHidden = mnesia:table_info(external_component_global, attributes). - %% --------------------------------------------------------------- %% Helpers %% --------------------------------------------------------------- @@ -199,21 +165,6 @@ verify(L) -> ct:pal("all messages routed correctly") end. -create_component_tables(AttrList) -> - {atomic, ok} = - mnesia:create_table(external_component, - [{attributes, AttrList}, - {local_content, true}]), - {atomic, ok} = - mnesia:create_table(external_component_global, - [{attributes, AttrList}, - {type, bag}, - {record_name, external_component}]). - -remove_component_tables() -> - mnesia:delete_table(external_component), - mnesia:delete_table(external_component_global). - resend_as_error(From0, To0, Acc0, Packet0) -> {Acc1, Packet1} = jlib:make_error_reply(Acc0, Packet0, #xmlel{}), Acc2 = ejabberd_router:route(To0, From0, Acc1, Packet1), From c8a16bbbc453fabe30e89613429d022698438114 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 30 Jun 2023 01:25:55 +0200 Subject: [PATCH 060/161] Fix dialyzer --- src/component/mongoose_component.erl | 2 +- src/component/mongoose_component_backend.erl | 18 ++++++++++++++++++ src/ejabberd_s2s_in.erl | 4 ++-- src/ejabberd_service.erl | 2 +- 4 files changed, 22 insertions(+), 4 deletions(-) diff --git a/src/component/mongoose_component.erl b/src/component/mongoose_component.erl index 54bc1e584e6..a75ba742fda 100644 --- a/src/component/mongoose_component.erl +++ b/src/component/mongoose_component.erl @@ -44,7 +44,7 @@ stop() -> hooks() -> [{node_cleanup, global, fun ?MODULE:node_cleanup/3, #{}, 90}]. --spec register_components([Domain :: domain()], +-spec register_components(Domain :: [domain()], Node :: node(), Handler :: mongoose_packet_handler:t(), AreHidden :: boolean()) -> {ok, [external_component()]} | {error, any()}. diff --git a/src/component/mongoose_component_backend.erl b/src/component/mongoose_component_backend.erl index e26ceeeda3f..f482147892c 100644 --- a/src/component/mongoose_component_backend.erl +++ b/src/component/mongoose_component_backend.erl @@ -1,9 +1,21 @@ -module(mongoose_component_backend). +-type external_component() :: mongoose_component:external_component(). + -callback init(map()) -> any(). -callback node_cleanup(node()) -> ok. +-callback register_components(Components :: [external_component()]) -> ok. + +-callback unregister_components(Components :: [external_component()]) -> ok. + +-callback lookup_component(Domain :: jid:lserver()) -> [external_component()]. + +-callback lookup_component(Domain :: jid:lserver(), Node :: node()) -> [external_component()]. + +-callback get_all_components(ReturnHidden :: mongoose_component:return_hidden()) -> [jid:lserver()]. + -export([init/1, node_cleanup/1, register_components/1, @@ -22,26 +34,32 @@ init(Opts) -> mongoose_backend:init(global, ?MAIN_MODULE, [], Opts), mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec node_cleanup(node()) -> ok. node_cleanup(Node) -> Args = [Node], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec register_components(Components :: [external_component()]) -> ok. register_components(Components) -> Args = [Components], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec unregister_components(Components :: [external_component()]) -> ok. unregister_components(Components) -> Args = [Components], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec lookup_component(Domain :: jid:lserver()) -> [external_component()]. lookup_component(Domain) -> Args = [Domain], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec lookup_component(Domain :: jid:lserver(), Node :: node()) -> [external_component()]. lookup_component(Domain, Node) -> Args = [Domain, Node], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec get_all_components(ReturnHidden :: mongoose_component:return_hidden()) -> [jid:lserver()]. get_all_components(ReturnHidden) -> Args = [ReturnHidden], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index e090b7e95c2..9617afdcf34 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -305,7 +305,7 @@ stream_established({xmlstreamelement, El}, StateData) -> %% domain is handled by this server: case {ejabberd_s2s:allow_host(LTo, LFrom), mongoose_router:is_registered_route(LTo) - orelse ejabberd_router:is_component_dirty(LTo)} of + orelse mongoose_component:has_component(LTo)} of {true, true} -> ejabberd_s2s_out:terminate_if_waiting_delay(LTo, LFrom), ejabberd_s2s_out:start(LTo, LFrom, @@ -421,7 +421,7 @@ is_s2s_authenticated(_, _, #state{authenticated = false}) -> false; is_s2s_authenticated(LFrom, LTo, #state{auth_domain = LFrom}) -> mongoose_router:is_registered_route(LTo) - orelse ejabberd_router:is_component_dirty(LTo); + orelse mongoose_component:has_component(LTo); is_s2s_authenticated(_, _, _) -> false. diff --git a/src/ejabberd_service.erl b/src/ejabberd_service.erl index acf30f4b5ec..fff2af1c59f 100644 --- a/src/ejabberd_service.erl +++ b/src/ejabberd_service.erl @@ -72,7 +72,7 @@ conflict_behaviour :: conflict_behaviour(), access, check_from, - components = [] :: mongoose_component:external_component() + components = [] :: [mongoose_component:external_component()] }). -type state() :: #state{}. From ceb288180f7a1fa22b7e4718f5ebb1dd7db9e46b Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 30 Jun 2023 05:12:02 +0200 Subject: [PATCH 061/161] Fix tests --- test/common/config_parser_helper.erl | 6 ++++++ test/config_parser_SUITE.erl | 6 ++++++ test/mongoose_config_SUITE.erl | 1 + 3 files changed, 13 insertions(+) diff --git a/test/common/config_parser_helper.erl b/test/common/config_parser_helper.erl index f535dfdaf23..d688a9a6fa9 100644 --- a/test/common/config_parser_helper.erl +++ b/test/common/config_parser_helper.erl @@ -27,6 +27,7 @@ options("host_types") -> #{event_cleaning_interval => 1000, event_max_age => 5000})}}, {sm_backend, mnesia}, + {component_backend, mnesia}, {{s2s, <<"another host type">>}, default_s2s()}, {{s2s, <<"localhost">>}, default_s2s()}, {{s2s, <<"some host type">>}, default_s2s()}, @@ -97,6 +98,7 @@ options("miscellaneous") -> {{s2s, <<"anonymous.localhost">>}, default_s2s()}, {{s2s, <<"localhost">>}, default_s2s()}, {sm_backend, mnesia}, + {component_backend, mnesia}, {{auth, <<"anonymous.localhost">>}, custom_auth()}, {{auth, <<"localhost">>}, custom_auth()}, {{modules, <<"anonymous.localhost">>}, #{}}, @@ -124,6 +126,7 @@ options("modules") -> {{s2s, <<"dummy_host">>}, default_s2s()}, {{s2s, <<"localhost">>}, default_s2s()}, {sm_backend, mnesia}, + {component_backend, mnesia}, {{auth, <<"dummy_host">>}, default_auth()}, {{auth, <<"localhost">>}, default_auth()}, {{modules, <<"dummy_host">>}, all_modules()}, @@ -261,6 +264,7 @@ options("mongooseim-pgsql") -> #{initial_report => 300000, periodic_report => 10800000}}}, {sm_backend, mnesia}, + {component_backend, mnesia}, {{auth, <<"anonymous.localhost">>}, (default_auth())#{anonymous => #{allow_multiple_connections => true, protocol => both}, @@ -366,6 +370,7 @@ options("outgoing_pools") -> {{s2s, <<"localhost">>}, default_s2s()}, {{s2s, <<"localhost.bis">>}, default_s2s()}, {sm_backend, mnesia}, + {component_backend, mnesia}, {{auth, <<"anonymous.localhost">>}, default_auth()}, {{auth, <<"localhost">>}, default_auth()}, {{auth, <<"localhost.bis">>}, default_auth()}, @@ -392,6 +397,7 @@ options("s2s_only") -> {routing_modules, mongoose_router:default_routing_modules()}, {services, #{}}, {sm_backend, mnesia}, + {component_backend, mnesia}, {{auth, <<"dummy_host">>}, default_auth()}, {{auth, <<"localhost">>}, default_auth()}, {{modules, <<"dummy_host">>}, #{}}, diff --git a/test/config_parser_SUITE.erl b/test/config_parser_SUITE.erl index aa8f0995b5b..4f8dd1b4153 100644 --- a/test/config_parser_SUITE.erl +++ b/test/config_parser_SUITE.erl @@ -75,6 +75,7 @@ groups() -> language, all_metrics_are_global, sm_backend, + component_backend, max_fsm_queue, http_server_name, rdbms_server_type, @@ -388,6 +389,11 @@ sm_backend(_Config) -> ?cfg(sm_backend, redis, #{<<"general">> => #{<<"sm_backend">> => <<"redis">>}}), ?err(#{<<"general">> => #{<<"sm_backend">> => <<"amnesia">>}}). +component_backend(_Config) -> + ?cfg(component_backend, mnesia, #{}), % default + ?cfg(component_backend, mnesia, #{<<"general">> => #{<<"component_backend">> => <<"mnesia">>}}), + ?err(#{<<"general">> => #{<<"component_backend">> => <<"amnesia">>}}). + max_fsm_queue(_Config) -> ?cfg(max_fsm_queue, 100, #{<<"general">> => #{<<"max_fsm_queue">> => 100}}), ?err(#{<<"general">> => #{<<"max_fsm_queue">> => -10}}). diff --git a/test/mongoose_config_SUITE.erl b/test/mongoose_config_SUITE.erl index 99e9447cbdb..61e228607de 100644 --- a/test/mongoose_config_SUITE.erl +++ b/test/mongoose_config_SUITE.erl @@ -189,6 +189,7 @@ minimal_config_opts() -> {routing_modules, mongoose_router:default_routing_modules()}, {services, #{}}, {sm_backend, mnesia}, + {component_backend, mnesia}, {{auth, <<"localhost">>}, config_parser_helper:default_auth()}, {{modules, <<"localhost">>}, #{}}, {{replaced_wait_timeout, <<"localhost">>}, 2000}, From 56004878b4dfb56c1ba51128a94ecb8f708d23a2 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 30 Jun 2023 05:47:15 +0200 Subject: [PATCH 062/161] Add mongoose_component_cets --- big_tests/test.config | 1 + rebar.lock | 2 +- rel/files/mongooseim.toml | 3 ++ src/component/mongoose_component_cets.erl | 42 +++++++++++++++++++++++ src/ejabberd_sup.erl | 2 +- 5 files changed, 48 insertions(+), 2 deletions(-) create mode 100644 src/component/mongoose_component_cets.erl diff --git a/big_tests/test.config b/big_tests/test.config index 7464c642094..3cd90be4a07 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -241,6 +241,7 @@ [{dbs, [redis, pgsql]}, {sm_backend, "\"cets\""}, {bosh_backend, "\"cets\""}, + {component_backend, "\"cets\""}, {stream_management_backend, cets}, {auth_method, "rdbms"}, {internal_databases, "[internal_databases.cets] diff --git a/rebar.lock b/rebar.lock index b74bc09a396..ea107e569fd 100644 --- a/rebar.lock +++ b/rebar.lock @@ -8,7 +8,7 @@ {<<"certifi">>,{pkg,<<"certifi">>,<<"2.9.0">>},1}, {<<"cets">>, {git,"https://github.com/esl/cets.git", - {ref,"c4f47edbe1bc7d467986c8e9dca56991c14f6a77"}}, + {ref,"458e2e1df3fb51896fe334385bb0d2c9c53ef87f"}}, 0}, {<<"cowboy">>,{pkg,<<"cowboy">>,<<"2.9.0">>},0}, {<<"cowboy_swagger">>,{pkg,<<"cowboy_swagger">>,<<"2.5.1">>},0}, diff --git a/rel/files/mongooseim.toml b/rel/files/mongooseim.toml index ed4ca9e0b1a..62063c3b451 100644 --- a/rel/files/mongooseim.toml +++ b/rel/files/mongooseim.toml @@ -11,6 +11,9 @@ {{#sm_backend}} sm_backend = {{{sm_backend}}} {{/sm_backend}} + {{#component_backend}} + component_backend = {{{component_backend}}} + {{/component_backend}} max_fsm_queue = 1000 {{#http_server_name}} http_server_name = {{{http_server_name}}} diff --git a/src/component/mongoose_component_cets.erl b/src/component/mongoose_component_cets.erl new file mode 100644 index 00000000000..5baa0e6283d --- /dev/null +++ b/src/component/mongoose_component_cets.erl @@ -0,0 +1,42 @@ +-module(mongoose_component_cets). +-behaviour(mongoose_component_backend). + +-export([init/1, + node_cleanup/1, + register_components/1, + unregister_components/1, + lookup_component/1, + lookup_component/2, + get_all_components/1]). + +-include("external_component.hrl"). +-define(TABLE, cets_external_component). + +init(_) -> + cets:start(?TABLE, #{type => bag, keypos => 2}), + cets_discovery:add_table(mongoose_cets_discovery, ?TABLE). + +node_cleanup(Node) -> + ets:match_delete(?TABLE, #external_component{node = Node, _ = '_'}), + ok. + +register_components(Components) -> + cets:insert_many(?TABLE, Components), + ok. + +unregister_components(Components) -> + cets:delete_object_many(?TABLE, Components), + ok. + +lookup_component(Domain) -> + ets:lookup(?TABLE, Domain). + +lookup_component(Domain, Node) -> + ets:match_object(?TABLE, #external_component{domain = Domain, node = Node, _ = '_'}). + +get_all_components(all) -> + MatchAll = {#external_component{ domain = '$1', _ = '_' }, [], ['$1']}, + ets:select(?TABLE, [MatchAll]); +get_all_components(only_public) -> + MatchNonHidden = {#external_component{ domain = '$1', is_hidden = false, _ = '_' }, [], ['$1']}, + ets:select(?TABLE, [MatchNonHidden]). diff --git a/src/ejabberd_sup.erl b/src/ejabberd_sup.erl index 0e84c454be3..09f634bf1a1 100644 --- a/src/ejabberd_sup.erl +++ b/src/ejabberd_sup.erl @@ -163,9 +163,9 @@ init([]) -> Hooks, Cleaner, SMBackendSupervisor, - Router, OutgoingPoolsSupervisor ] ++ cets_specs() ++ [ + Router, S2S, Local, ReceiverSupervisor, From baf60267d3724933b974a1b0f295b5b96eaef233 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 30 Jun 2023 06:47:57 +0200 Subject: [PATCH 063/161] Fix component_reg_SUITE --- test/component_reg_SUITE.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/test/component_reg_SUITE.erl b/test/component_reg_SUITE.erl index 8c36c11079f..6fb0d6b4914 100644 --- a/test/component_reg_SUITE.erl +++ b/test/component_reg_SUITE.erl @@ -36,6 +36,7 @@ end_per_suite(_C) -> opts() -> [{all_metrics_are_global, false}, + {component_backend, mnesia}, {routing_modules, [xmpp_router_a, xmpp_router_b, xmpp_router_c]}]. registering(_C) -> From f37241eda0c8691a789bc2504eadf4ce86fa408b Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 30 Jun 2023 07:17:11 +0200 Subject: [PATCH 064/161] Delete old external_componenst schema --- src/component/mongoose_component_mnesia.erl | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/component/mongoose_component_mnesia.erl b/src/component/mongoose_component_mnesia.erl index 3e5d7621c48..7911570248a 100644 --- a/src/component/mongoose_component_mnesia.erl +++ b/src/component/mongoose_component_mnesia.erl @@ -20,12 +20,11 @@ init(_) -> mnesia:add_table_copy(external_component, node(), ram_copies). update_tables() -> - case catch mnesia:table_info(external_component, attributes) of - [domain, handler, node] -> + %% delete old schema + case catch mnesia:table_info(external_componenst, local_content) of + true -> mnesia:delete_table(external_component); - [domain, handler, node, is_hidden] -> - ok; - {'EXIT', _} -> + _ -> ok end. From c58a0d2939309bafb0eba1d37c80aca6334288a6 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 30 Jun 2023 07:18:37 +0200 Subject: [PATCH 065/161] Fix muc_light_SUITE --- test/muc_light_SUITE.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/test/muc_light_SUITE.erl b/test/muc_light_SUITE.erl index 7bee59df83b..47a4825e6d3 100644 --- a/test/muc_light_SUITE.erl +++ b/test/muc_light_SUITE.erl @@ -82,6 +82,7 @@ opts() -> [{hosts, [host_type()]}, {host_types, []}, {all_metrics_are_global, false}, + {component_backend, mnesia}, {{modules, host_type()}, #{mod_muc_light => default_mod_config(mod_muc_light)}}]. %% ------------------------------------------------------------------ From 0b58e50299c68488bb8e2e7fd9c4d8587d9e9d53 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 11 Jul 2023 11:35:27 +0200 Subject: [PATCH 066/161] Fixes from review comments --- src/component/mongoose_component.erl | 14 ++++++++------ test/config_parser_SUITE.erl | 2 ++ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/component/mongoose_component.erl b/src/component/mongoose_component.erl index a75ba742fda..64bc3e6c0e3 100644 --- a/src/component/mongoose_component.erl +++ b/src/component/mongoose_component.erl @@ -18,6 +18,7 @@ -type external_component() :: #external_component{domain :: domain(), handler :: mongoose_packet_handler:t(), + node :: node(), is_hidden :: boolean()}. -export_type([external_component/0]). @@ -90,17 +91,18 @@ unregister_components(Components) -> mongoose_component_backend:unregister_components(Components). assert_can_register_components(Components) -> - Checks = lists:map(fun is_already_registered/1, Components), - Zip = lists:zip(Components, Checks), - ConfictDomains = - [LDomain || {#external_component{domain = LDomain}, true} <- Zip], - case ConfictDomains of + ConflictComponents = lists:filter(fun is_already_registered/1, Components), + ConflictDomains = records_to_domains(ConflictComponents), + case ConflictDomains of [] -> ok; _ -> - error({routes_already_exist, ConfictDomains}) + error({routes_already_exist, ConflictDomains}) end. +records_to_domains(Components) -> + [LDomain || #external_component{domain = LDomain} <- Components]. + %% Returns true if any component route is registered for the domain. -spec has_component(jid:lserver()) -> boolean(). has_component(Domain) -> diff --git a/test/config_parser_SUITE.erl b/test/config_parser_SUITE.erl index 4f8dd1b4153..9ac19818600 100644 --- a/test/config_parser_SUITE.erl +++ b/test/config_parser_SUITE.erl @@ -386,12 +386,14 @@ all_metrics_are_global(_Config) -> sm_backend(_Config) -> ?cfg(sm_backend, mnesia, #{}), % default ?cfg(sm_backend, mnesia, #{<<"general">> => #{<<"sm_backend">> => <<"mnesia">>}}), + ?cfg(sm_backend, cets, #{<<"general">> => #{<<"sm_backend">> => <<"cets">>}}), ?cfg(sm_backend, redis, #{<<"general">> => #{<<"sm_backend">> => <<"redis">>}}), ?err(#{<<"general">> => #{<<"sm_backend">> => <<"amnesia">>}}). component_backend(_Config) -> ?cfg(component_backend, mnesia, #{}), % default ?cfg(component_backend, mnesia, #{<<"general">> => #{<<"component_backend">> => <<"mnesia">>}}), + ?cfg(component_backend, cets, #{<<"general">> => #{<<"component_backend">> => <<"cets">>}}), ?err(#{<<"general">> => #{<<"component_backend">> => <<"amnesia">>}}). max_fsm_queue(_Config) -> From 8cda94d8e1888c835a63b0955bf3fc7e9961dad1 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 11 Jul 2023 12:47:56 +0200 Subject: [PATCH 067/161] Add test for node_cleanup for components --- src/component/mongoose_component.erl | 4 +- src/component/mongoose_component_cets.erl | 2 +- test/mongoose_cleanup_SUITE.erl | 77 +++++++++++++++++++++-- 3 files changed, 76 insertions(+), 7 deletions(-) diff --git a/src/component/mongoose_component.erl b/src/component/mongoose_component.erl index 64bc3e6c0e3..763436d7b40 100644 --- a/src/component/mongoose_component.erl +++ b/src/component/mongoose_component.erl @@ -36,10 +36,10 @@ start() -> Backend = mongoose_config:get_opt(component_backend), mongoose_component_backend:init(#{backend => Backend}), - gen_hook:delete_handlers(hooks()). + gen_hook:add_handlers(hooks()). stop() -> - gen_hook:add_handlers(hooks()). + gen_hook:delete_handlers(hooks()). -spec hooks() -> [gen_hook:hook_tuple()]. hooks() -> diff --git a/src/component/mongoose_component_cets.erl b/src/component/mongoose_component_cets.erl index 5baa0e6283d..06ef5670076 100644 --- a/src/component/mongoose_component_cets.erl +++ b/src/component/mongoose_component_cets.erl @@ -25,7 +25,7 @@ register_components(Components) -> ok. unregister_components(Components) -> - cets:delete_object_many(?TABLE, Components), + cets:delete_objects(?TABLE, Components), ok. lookup_component(Domain) -> diff --git a/test/mongoose_cleanup_SUITE.erl b/test/mongoose_cleanup_SUITE.erl index e9907e049fa..428b95b2178 100644 --- a/test/mongoose_cleanup_SUITE.erl +++ b/test/mongoose_cleanup_SUITE.erl @@ -3,15 +3,18 @@ -include_lib("eunit/include/eunit.hrl"). -include("mongoose.hrl"). --export([all/0, +-export([all/0, groups/0, init_per_suite/1, end_per_suite/1, + init_per_group/2, end_per_group/2, init_per_testcase/2, end_per_testcase/2]). -export([cleaner_runs_hook_on_nodedown/1, notify_self_hook/3]). -export([auth_anonymous/1, last/1, stream_management/1, s2s/1, - bosh/1 + bosh/1, + component/1, + component_from_other_node_remains/1 ]). -define(HOST, <<"localhost">>). @@ -28,11 +31,18 @@ all() -> last, stream_management, s2s, - bosh + bosh, + [{group, Group} || {Group, _, _} <- groups()] ]. +groups() -> + [{component_cets, [], component_cases()}, + {component_mnesia, [], component_cases()}]. + +component_cases() -> + [component, component_from_other_node_remains]. + init_per_suite(Config) -> - mim_ct_sup:start_link(ejabberd_sup), {ok, _} = application:ensure_all_started(jid), ok = mnesia:create_schema([node()]), ok = mnesia:start(), @@ -47,16 +57,53 @@ end_per_suite(Config) -> mnesia:delete_schema([node()]), Config. +init_per_group(component_mnesia, Config) -> + mongoose_config:set_opt(component_backend, mnesia), + Config; +init_per_group(component_cets, Config) -> + mongoose_config:set_opt(component_backend, cets), + DiscoOpts = #{name => mongoose_cets_discovery, disco_file => "does_not_exist.txt"}, + {ok, _Pid} = cets_discovery:start(DiscoOpts), + Config. + +end_per_group(component_cets, _Config) -> + exit(whereis(mongoose_cets_discovery), kill); +end_per_group(_Group, _Config) -> + ok. + init_per_testcase(TestCase, Config) -> + mim_ct_sup:start_link(ejabberd_sup), {ok, _HooksServer} = gen_hook:start_link(), setup_meck(meck_mods(TestCase)), + start_component(TestCase), Config. end_per_testcase(TestCase, _Config) -> + stop_component(TestCase), mongoose_modules:stop(), mongoose_config:set_opt({modules, ?HOST}, #{}), unload_meck(meck_mods(TestCase)). +start_component(TestCase) -> + case needs_component(TestCase) of + true -> + mongoose_router:start(), + mongoose_component:start(); + false -> + ok + end. + +stop_component(TestCase) -> + case needs_component(TestCase) of + true -> + mongoose_component:stop(); + false -> + ok + end. + +needs_component(TestCase) -> + lists:member(TestCase, component_cases()). + opts() -> [{hosts, [?HOST]}, {host_types, []}, @@ -65,6 +112,7 @@ opts() -> meck_mods(bosh) -> [exometer, mod_bosh_socket]; meck_mods(s2s) -> [exometer, ejabberd_commands, mongoose_bin]; +meck_mods(component) -> [exometer]; meck_mods(_) -> [exometer, ejabberd_sm, ejabberd_local]. %% ----------------------------------------------------- @@ -153,6 +201,27 @@ bosh(_Config) -> {error, _} = mod_bosh:get_session_socket(SID), ok. +component(_Config) -> + Handler = fun() -> ok end, + Domain = <<"cool.localhost">>, + Node = some_node, + {ok, _} = mongoose_component:register_components([Domain], Node, Handler, false), + true = mongoose_component:has_component(Domain), + #{mongoose_component := ok} = mongoose_hooks:node_cleanup(Node), + [] = mongoose_component:dirty_get_all_components(all), + false = mongoose_component:has_component(Domain), + ok. + +component_from_other_node_remains(_Config) -> + Handler = fun() -> ok end, + Domain = <<"cool.localhost">>, + {ok, Comps} = mongoose_component:register_components([Domain], other_node, Handler, false), + true = mongoose_component:has_component(Domain), + #{mongoose_component := ok} = mongoose_hooks:node_cleanup(some_node), + true = mongoose_component:has_component(Domain), + mongoose_component:unregister_components(Comps), + ok. + %% ----------------------------------------------------- %% Internal %% ----------------------------------------------------- From 84ece5144b259246bc17241c9d94a90b9cac5fdd Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 29 Jun 2023 15:39:54 +0200 Subject: [PATCH 068/161] CETS backend for S2S --- rel/files/mongooseim.toml | 3 + src/config/mongoose_config_spec.erl | 4 + src/ejabberd_s2s.erl | 410 ++++++++++++---------------- src/ejabberd_s2s_backend.erl | 45 +++ src/ejabberd_s2s_cets.erl | 60 ++++ src/ejabberd_s2s_in.erl | 36 ++- src/ejabberd_s2s_mnesia.erl | 96 +++++++ src/ejabberd_s2s_out.erl | 33 ++- 8 files changed, 435 insertions(+), 252 deletions(-) create mode 100644 src/ejabberd_s2s_backend.erl create mode 100644 src/ejabberd_s2s_cets.erl create mode 100644 src/ejabberd_s2s_mnesia.erl diff --git a/rel/files/mongooseim.toml b/rel/files/mongooseim.toml index 62063c3b451..dc1b652b99d 100644 --- a/rel/files/mongooseim.toml +++ b/rel/files/mongooseim.toml @@ -14,6 +14,9 @@ {{#component_backend}} component_backend = {{{component_backend}}} {{/component_backend}} + {{#s2s_backend}} + s2s_backend = {{{s2s_backend}}} + {{/s2s_backend}} max_fsm_queue = 1000 {{#http_server_name}} http_server_name = {{{http_server_name}}} diff --git a/src/config/mongoose_config_spec.erl b/src/config/mongoose_config_spec.erl index 53f884567da..6c9e5ba67ba 100644 --- a/src/config/mongoose_config_spec.erl +++ b/src/config/mongoose_config_spec.erl @@ -174,6 +174,9 @@ general() -> <<"component_backend">> => #option{type = atom, validate = {module, mongoose_component}, wrap = global_config}, + <<"s2s_backend">> => #option{type = atom, + validate = {module, ejabberd_s2s}, + wrap = global_config}, <<"max_fsm_queue">> => #option{type = integer, validate = positive, wrap = global_config}, @@ -213,6 +216,7 @@ general_defaults() -> <<"all_metrics_are_global">> => false, <<"sm_backend">> => mnesia, <<"component_backend">> => mnesia, + <<"s2s_backend">> => mnesia, <<"rdbms_server_type">> => generic, <<"mongooseimctl_access_commands">> => #{}, <<"routing_modules">> => mongoose_router:default_routing_modules(), diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index 8ff94cff6e9..d52c181a1b8 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -41,7 +41,6 @@ try_register/1, remove_connection/2, find_connection/2, - dirty_get_connections/0, allow_host/2, domain_utf8_to_ascii/1, timeout/0, @@ -58,7 +57,7 @@ %% ejabberd API -export([get_info_s2s_connections/1]). --ignore_xref([dirty_get_connections/0, get_info_s2s_connections/1, have_connection/1, +-ignore_xref([get_info_s2s_connections/1, have_connection/1, incoming_s2s_number/0, outgoing_s2s_number/0, start_link/0]). -include("mongoose.hrl"). @@ -69,27 +68,20 @@ -define(DEFAULT_MAX_S2S_CONNECTIONS_NUMBER_PER_NODE, 1). -type fromto() :: {'global' | jid:server(), jid:server()}. --record(s2s, { - fromto, - pid - }). --type s2s() :: #s2s{ - fromto :: fromto(), - pid :: pid() - }. --record(s2s_shared, { - host_type :: mongooseim:host_type(), - secret :: binary() - }). -record(state, {}). +-type secret_source() :: config | random. +-type base16_secret() :: binary(). + +-export_type([fromto/0, secret_source/0, base16_secret/0]). + %%==================================================================== %% API %%==================================================================== %%-------------------------------------------------------------------- %% Description: Starts the server %%-------------------------------------------------------------------- --spec start_link() -> 'ignore' | {'error', _} | {'ok', pid()}. +-spec start_link() -> ignore | {error, _} | {ok, pid()}. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). @@ -99,89 +91,58 @@ filter(From, To, Acc, Packet) -> route(From, To, Acc, Packet) -> do_route(From, To, Acc, Packet). --spec remove_connection(_, pid()) -> 'ok' | {'aborted', _} | {'atomic', _}. +-spec remove_connection(_, pid()) -> ok. remove_connection(FromTo, Pid) -> - case catch mnesia:dirty_match_object(s2s, #s2s{fromto = FromTo, - pid = Pid}) of - [#s2s{pid = Pid}] -> - F = fun() -> - mnesia:delete_object(#s2s{fromto = FromTo, - pid = Pid}) - end, - mnesia:transaction(F); - _ -> - ok - end. + try + call_remove_connection(FromTo, Pid) + catch Class:Reason:Stacktrace -> + ?LOG_ERROR(#{what => s2s_remove_connection_failed, + from_to => FromTo, s2s_pid => Pid, + class => Class, reason => Reason, + stacktrace => Stacktrace}) + end, + ok. have_connection(FromTo) -> - case catch mnesia:dirty_read(s2s, FromTo) of - [_] -> - true; - _ -> - false - end. + get_connections_pids(FromTo) =/= []. --spec get_connections_pids(_) -> ['undefined' | pid()]. +-spec get_connections_pids(_) -> [pid()]. get_connections_pids(FromTo) -> - case catch mnesia:dirty_read(s2s, FromTo) of - L when is_list(L) -> - [Connection#s2s.pid || Connection <- L]; - _ -> + case dirty_read_s2s_list_pids(FromTo) of + {ok, L} when is_list(L) -> + L; + {error, _} -> [] end. -spec try_register(fromto()) -> boolean(). try_register(FromTo) -> - MaxS2SConnectionsNumber = max_s2s_connections_number(FromTo), - MaxS2SConnectionsNumberPerNode = - max_s2s_connections_number_per_node(FromTo), - F = fun() -> - L = mnesia:read({s2s, FromTo}), - NeededConnections = needed_connections_number( - L, MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode), - case NeededConnections > 0 of - true -> - mnesia:write(#s2s{fromto = FromTo, - pid = self()}), - true; - false -> - false - end - end, - case mnesia:transaction(F) of - {atomic, Res} -> - Res; - _ -> + ShouldWriteF = should_write_f(FromTo), + Pid = self(), + case call_try_register(Pid, ShouldWriteF, FromTo) of + true -> + true; + false -> + {FromServer, ToServer} = FromTo, + ?LOG_ERROR(#{what => s2s_register_failed, + from_server => FromServer, + to_server => ToServer}), false end. -dirty_get_connections() -> - mnesia:dirty_all_keys(s2s). - %%==================================================================== %% Hooks callbacks %%==================================================================== -spec node_cleanup(map(), map(), map()) -> {ok, map()}. node_cleanup(Acc, #{node := Node}, _) -> - F = fun() -> - Es = mnesia:select( - s2s, - [{#s2s{pid = '$1', _ = '_'}, - [{'==', {node, '$1'}, Node}], - ['$_']}]), - lists:foreach(fun(E) -> - mnesia:delete_object(E) - end, Es) - end, - Res = mnesia:async_dirty(F), + Res = call_node_cleanup(Node), {ok, maps:put(?MODULE, Res, Acc)}. -spec key(mongooseim:host_type(), {jid:lserver(), jid:lserver()}, binary()) -> binary(). key(HostType, {From, To}, StreamID) -> - Secret = get_shared_secret(HostType), + {ok, {_, Secret}} = get_shared_secret(HostType), SecretHashed = base16:encode(crypto:hash(sha256, Secret)), HMac = crypto:mac(hmac, sha256, SecretHashed, [From, " ", To, " ", StreamID]), base16:encode(HMac). @@ -190,75 +151,30 @@ key(HostType, {From, To}, StreamID) -> %% gen_server callbacks %%==================================================================== -%%-------------------------------------------------------------------- -%% Function: init(Args) -> {ok, State} | -%% {ok, State, Timeout} | -%% ignore | -%% {stop, Reason} -%% Description: Initiates the server -%%-------------------------------------------------------------------- init([]) -> - mnesia:create_table(s2s, [{ram_copies, [node()]}, {type, bag}, - {attributes, record_info(fields, s2s)}]), - mnesia:add_table_copy(s2s, node(), ram_copies), - mnesia:create_table(s2s_shared, [{ram_copies, [node()]}, - {attributes, record_info(fields, s2s_shared)}]), - mnesia:add_table_copy(s2s_shared, node(), ram_copies), - {atomic, ok} = set_shared_secret(), + db_init(), + set_shared_secret(), ejabberd_commands:register_commands(commands()), gen_hook:add_handlers(hooks()), {ok, #state{}}. -%%-------------------------------------------------------------------- -%% Function: %% handle_call(Request, From, State) -> {reply, Reply, State} | -%% {reply, Reply, State, Timeout} | -%% {noreply, State} | -%% {noreply, State, Timeout} | -%% {stop, Reason, Reply, State} | -%% {stop, Reason, State} -%% Description: Handling call messages -%%-------------------------------------------------------------------- handle_call(Request, From, State) -> ?UNEXPECTED_CALL(Request, From), {reply, {error, unexpected_call}, State}. -%%-------------------------------------------------------------------- -%% Function: handle_cast(Msg, State) -> {noreply, State} | -%% {noreply, State, Timeout} | -%% {stop, Reason, State} -%% Description: Handling cast messages -%%-------------------------------------------------------------------- handle_cast(Msg, State) -> ?UNEXPECTED_CAST(Msg), {noreply, State}. -%%-------------------------------------------------------------------- -%% Function: handle_info(Info, State) -> {noreply, State} | -%% {noreply, State, Timeout} | -%% {stop, Reason, State} -%% Description: Handling all non call/cast messages -%%-------------------------------------------------------------------- - handle_info(Msg, State) -> ?UNEXPECTED_INFO(Msg), {noreply, State}. -%%-------------------------------------------------------------------- -%% Function: terminate(Reason, State) -> void() -%% Description: This function is called by a gen_server when it is about to -%% terminate. It should be the opposite of Module:init/1 and do any necessary -%% cleaning up. When it returns, the gen_server terminates with Reason. -%% The return value is ignored. -%%-------------------------------------------------------------------- terminate(_Reason, _State) -> gen_hook:delete_handlers(hooks()), ejabberd_commands:unregister_commands(commands()), ok. -%%-------------------------------------------------------------------- -%% Func: code_change(OldVsn, State, Extra) -> {ok, NewState} -%% Description: Convert process state when code is changed -%%-------------------------------------------------------------------- code_change(_OldVsn, State, _Extra) -> {ok, State}. @@ -277,7 +193,7 @@ hooks() -> do_route(From, To, Acc, Packet) -> ?LOG_DEBUG(#{what => s2s_route, acc => Acc}), case find_connection(From, To) of - {atomic, Pid} when is_pid(Pid) -> + {ok, Pid} when is_pid(Pid) -> ?LOG_DEBUG(#{what => s2s_found_connection, text => <<"Send packet to s2s connection">>, s2s_pid => Pid, acc => Acc}), @@ -289,7 +205,7 @@ do_route(From, To, Acc, Packet) -> Acc1 = mongoose_hooks:s2s_send_packet(Acc, From, To, Packet), send_element(Pid, Acc1, NewPacket), {done, Acc1}; - {aborted, _Reason} -> + {error, _Reason} -> case mongoose_acc:stanza_type(Acc) of <<"error">> -> {done, Acc}; @@ -305,8 +221,13 @@ do_route(From, To, Acc, Packet) -> end. -spec find_connection(From :: jid:jid(), - To :: jid:jid()) -> {'aborted', _} | {'atomic', _}. + To :: jid:jid()) -> {ok, pid()} | {error, Reason :: term()}. find_connection(From, To) -> + find_connection(From, To, 3). + +find_connection(_From, _To, 0) -> + {error, retries_failed}; +find_connection(From, To, Retries) -> #jid{lserver = MyServer} = From, #jid{lserver = Server} = To, FromTo = {MyServer, Server}, @@ -314,43 +235,30 @@ find_connection(From, To) -> MaxS2SConnectionsNumberPerNode = max_s2s_connections_number_per_node(FromTo), ?LOG_DEBUG(#{what => s2s_find_connection, from_server => MyServer, to_server => Server}), - case catch mnesia:dirty_read(s2s, FromTo) of - {'EXIT', Reason} -> - {aborted, Reason}; - [] -> + case dirty_read_s2s_list_pids(FromTo) of + {error, Reason} -> + {error, Reason}; + {ok, []} -> + %% TODO too complex, and could cause issues on bursts. + %% What would happen if connection is denied? + %% Start a pool instead maybe? + %% When do we close the connection? + %% We try to establish all the connections if the host is not a %% service and if the s2s host is not blacklisted or %% is in whitelist: maybe_open_several_connections(From, To, MyServer, Server, FromTo, MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode); - L when is_list(L) -> - maybe_open_missing_connections(From, MyServer, Server, FromTo, + MaxS2SConnectionsNumberPerNode, Retries); + {ok, L} when is_list(L) -> + maybe_open_missing_connections(From, To, MyServer, Server, FromTo, MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode, L) - end. - -maybe_open_missing_connections(From, MyServer, Server, FromTo, - MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode, L) -> - NeededConnections = needed_connections_number( - L, MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode), - case NeededConnections > 0 of - true -> - %% We establish the missing connections for this pair. - open_several_connections( - NeededConnections, MyServer, - Server, From, FromTo, - MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode); - false -> - %% We choose a connexion from the pool of opened ones. - {atomic, choose_connection(From, L)} + MaxS2SConnectionsNumberPerNode, L, Retries) end. maybe_open_several_connections(From, To, MyServer, Server, FromTo, MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode) -> + MaxS2SConnectionsNumberPerNode, Retries) -> %% We try to establish all the connections if the host is not a %% service and if the s2s host is not blacklisted or %% is in whitelist: @@ -360,18 +268,31 @@ maybe_open_several_connections(From, To, MyServer, Server, FromTo, [], MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode), open_several_connections( - NeededConnections, MyServer, - Server, From, FromTo, - MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode); + NeededConnections, MyServer, Server, FromTo), + find_connection(From, To, Retries - 1); false -> - {aborted, error} + {error, not_allowed} end. --spec choose_connection(From :: jid:jid(), - Connections :: [s2s()]) -> any(). -choose_connection(From, Connections) -> - choose_pid(From, [C#s2s.pid || C <- Connections]). +maybe_open_missing_connections(From, To, MyServer, Server, FromTo, + MaxS2SConnectionsNumber, + MaxS2SConnectionsNumberPerNode, L, Retries) -> + NeededConnections = needed_connections_number( + L, MaxS2SConnectionsNumber, + MaxS2SConnectionsNumberPerNode), + case NeededConnections > 0 of + true -> + %% We establish the missing connections for this pair. + open_several_connections( + NeededConnections, MyServer, + Server, FromTo), + find_connection(From, To, Retries - 1); + false -> + %% We choose a connexion from the pool of opened ones. + {ok, choose_pid(From, L)} + end. +%% Prefers the local connection (i.e. not on the remote node) -spec choose_pid(From :: jid:jid(), Pids :: [pid()]) -> pid(). choose_pid(From, Pids) -> Pids1 = case [P || P <- Pids, node(P) == node()] of @@ -385,57 +306,33 @@ choose_pid(From, Pids) -> Pid. -spec open_several_connections(N :: pos_integer(), MyServer :: jid:server(), - Server :: jid:server(), From :: jid:jid(), FromTo :: fromto(), - MaxS2S :: pos_integer(), MaxS2SPerNode :: pos_integer()) - -> {'aborted', _} | {'atomic', _}. -open_several_connections(N, MyServer, Server, From, FromTo, - MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode) -> - ConnectionsResult = - [new_connection(MyServer, Server, From, FromTo, - MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode) - || _N <- lists:seq(1, N)], - case [PID || {atomic, PID} <- ConnectionsResult] of - [] -> - hd(ConnectionsResult); - PIDs -> - {atomic, choose_pid(From, PIDs)} - end. + Server :: jid:server(), FromTo :: fromto()) -> ok. +open_several_connections(N, MyServer, Server, FromTo) -> + ShouldWriteF = should_write_f(FromTo), + [new_connection(MyServer, Server, FromTo, ShouldWriteF) + || _N <- lists:seq(1, N)], + ok. -spec new_connection(MyServer :: jid:server(), Server :: jid:server(), - From :: jid:jid(), FromTo :: fromto(), MaxS2S :: pos_integer(), - MaxS2SPerNode :: pos_integer()) -> {'aborted', _} | {'atomic', _}. -new_connection(MyServer, Server, From, FromTo = {FromServer, ToServer}, - MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode) -> - {ok, Pid} = ejabberd_s2s_out:start( - MyServer, Server, new), - F = fun() -> - L = mnesia:read({s2s, FromTo}), - NeededConnections = needed_connections_number( - L, MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode), - case NeededConnections > 0 of - true -> - mnesia:write(#s2s{fromto = FromTo, - pid = Pid}), - ?LOG_INFO(#{what => s2s_new_connection, - text => <<"New s2s connection started">>, - from_server => FromServer, - to_server => ToServer, - s2s_pid => Pid}), - Pid; - false -> - choose_connection(From, L) - end - end, - TRes = mnesia:transaction(F), - case TRes of - {atomic, Pid} -> + FromTo :: fromto(), ShouldWriteF :: fun()) -> ok. +new_connection(MyServer, Server, FromTo, ShouldWriteF) -> + {ok, Pid} = ejabberd_s2s_out:start(MyServer, Server, new), + case call_try_register(Pid, ShouldWriteF, FromTo) of + true -> + log_new_connection_result(Pid, FromTo), ejabberd_s2s_out:start_connection(Pid); - _ -> + false -> ejabberd_s2s_out:stop_connection(Pid) end, - TRes. + ok. + +log_new_connection_result(Pid, FromTo) -> + {FromServer, ToServer} = FromTo, + ?LOG_INFO(#{what => s2s_new_connection, + text => <<"New s2s connection started">>, + from_server => FromServer, + to_server => ToServer, + s2s_pid => Pid}). -spec max_s2s_connections_number(fromto()) -> pos_integer(). max_s2s_connections_number({From, To}) -> @@ -453,13 +350,24 @@ max_s2s_connections_number_per_node({From, To}) -> _ -> ?DEFAULT_MAX_S2S_CONNECTIONS_NUMBER_PER_NODE end. --spec needed_connections_number([any()], pos_integer(), pos_integer()) -> integer(). +-spec needed_connections_number([pid()], pos_integer(), pos_integer()) -> integer(). needed_connections_number(Ls, MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode) -> - LocalLs = [L || L <- Ls, node(L#s2s.pid) == node()], + LocalLs = [L || L <- Ls, node(L) == node()], lists:min([MaxS2SConnectionsNumber - length(Ls), MaxS2SConnectionsNumberPerNode - length(LocalLs)]). +should_write_f(FromTo) -> + MaxS2SConnectionsNumber = max_s2s_connections_number(FromTo), + MaxS2SConnectionsNumberPerNode = + max_s2s_connections_number_per_node(FromTo), + fun(L) -> + NeededConnections = needed_connections_number( + L, MaxS2SConnectionsNumber, + MaxS2SConnectionsNumberPerNode), + NeededConnections > 0 + end. + %%-------------------------------------------------------------------- %% Function: is_service(From, To) -> true | false %% Description: Return true if the destination must be considered as a @@ -571,12 +479,12 @@ get_s2s_info(Connections, Type)-> complete_s2s_info([], _, Result)-> Result; complete_s2s_info([Connection|T], Type, Result)-> - {_, PID, _, _}=Connection, + {_, PID, _, _} = Connection, State = get_s2s_state(PID), complete_s2s_info(T, Type, [State|Result]). -spec get_s2s_state(connstate()) -> [{atom(), any()}, ...]. -get_s2s_state(S2sPid)-> +get_s2s_state(S2sPid) -> Infos = case gen_fsm_compat:sync_send_all_state_event(S2sPid, get_state_infos) of {state_infos, Is} -> [{status, open} | Is]; {noproc, _} -> [{status, closed}]; %% Connection closed @@ -584,27 +492,33 @@ get_s2s_state(S2sPid)-> end, [{s2s_pid, S2sPid} | Infos]. --spec get_shared_secret(mongooseim:host_type()) -> binary(). -get_shared_secret(HostType) -> - [#s2s_shared{secret = Secret}] = ets:lookup(s2s_shared, HostType), - Secret. - --spec set_shared_secret() -> {atomic, ok} | {aborted, term()}. +-spec set_shared_secret() -> ok. set_shared_secret() -> - mnesia:transaction(fun() -> - [set_shared_secret_t(HostType) || HostType <- ?ALL_HOST_TYPES], - ok - end). - --spec set_shared_secret_t(mongooseim:host_type()) -> ok. -set_shared_secret_t(HostType) -> - Secret = case mongoose_config:lookup_opt([{s2s, HostType}, shared]) of - {ok, SecretFromConfig} -> - SecretFromConfig; - {error, not_found} -> - base16:encode(crypto:strong_rand_bytes(10)) - end, - mnesia:write(#s2s_shared{host_type = HostType, secret = Secret}). + [set_shared_secret(HostType) || HostType <- ?ALL_HOST_TYPES], + ok. + +set_shared_secret(HostType) -> + {Source, Secret} = get_shared_secret_from_config_or_make_new(HostType), + case get_shared_secret(HostType) of + {error, not_found} -> + %% Write secret for the first time + register_secret(HostType, Source, Secret); + {ok, {_, OldSecret}} when OldSecret =:= Secret -> + skip_same; + {ok, _} when Source =:= config -> + ?LOG_INFO(#{what => overwrite_secret_from_config}), + register_secret(HostType, Source, Secret); + {ok, _} -> + ok + end. + +get_shared_secret_from_config_or_make_new(HostType) -> + case mongoose_config:lookup_opt([{s2s, HostType}, shared]) of + {ok, SecretFromConfig} -> + {config, SecretFromConfig}; + {error, not_found} -> + {random, base16:encode(crypto:strong_rand_bytes(10))} + end. -spec lookup_certfile(mongooseim:host_type()) -> {ok, string()} | {error, not_found}. lookup_certfile(HostType) -> @@ -614,3 +528,41 @@ lookup_certfile(HostType) -> {error, not_found} -> mongoose_config:lookup_opt([{s2s, HostType}, certfile]) end. + + +%% Backend logic below: + +db_init() -> + Backend = mongoose_config:get_opt(s2s_backend), + ejabberd_s2s_backend:init(#{backend => Backend}). + +-spec dirty_read_s2s_list_pids(FromTo :: fromto()) -> {ok, [pid()]} | {error, Reason :: term()}. +dirty_read_s2s_list_pids(FromTo) -> + try + ejabberd_s2s_backend:dirty_read_s2s_list_pids(FromTo) + catch Class:Reason:Stacktrace -> + ?LOG_ERROR(#{what => s2s_dirty_read_s2s_list_failed, + from_to => FromTo, + class => Class, reason => Reason, + stacktrace => Stacktrace}), + {error, Reason} + end. + +%% Returns true if the connection is registered +-spec call_try_register(Pid :: pid(), ShouldWriteF :: fun(), FromTo :: fromto()) -> boolean(). +call_try_register(Pid, ShouldWriteF, FromTo) -> + ejabberd_s2s_backend:try_register(Pid, ShouldWriteF, FromTo). + +call_node_cleanup(Node) -> + ejabberd_s2s_backend:node_cleanup(Node). + +call_remove_connection(FromTo, Pid) -> + ejabberd_s2s_backend:remove_connection(FromTo, Pid). + +-spec get_shared_secret(mongooseim:host_type()) -> {ok, {secret_source(), base16_secret()}} | {error, not_found}. +get_shared_secret(HostType) -> + ejabberd_s2s_backend:get_shared_secret(HostType). + +-spec register_secret(mongooseim:host_type(), ejabberd_s2s:secret_source(), ejabberd_s2s:base16_secret()) -> ok. +register_secret(HostType, Source, Secret) -> + ejabberd_s2s_backend:register_secret(HostType, Source, Secret). diff --git a/src/ejabberd_s2s_backend.erl b/src/ejabberd_s2s_backend.erl new file mode 100644 index 00000000000..54489f85bfc --- /dev/null +++ b/src/ejabberd_s2s_backend.erl @@ -0,0 +1,45 @@ +-module(ejabberd_s2s_backend). + +-callback init(map()) -> + any(). +-callback dirty_read_s2s_list_pids(ejabberd_s2s:fromto()) -> + {ok, [pid()]} | {error, Reason :: term()}. + +-export([init/1, + dirty_read_s2s_list_pids/1, + try_register/3, + remove_connection/2, + node_cleanup/1]). + +-export([register_secret/3, + get_shared_secret/1]). + +-ignore_xref([behaviour_info/1]). + +-define(MAIN_MODULE, ejabberd_s2s). + +-spec init(map()) -> any(). +init(Opts) -> + Args = [Opts], + mongoose_backend:init(global, ?MAIN_MODULE, [], Opts), + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +-spec dirty_read_s2s_list_pids(ejabberd_s2s:fromto()) -> + {ok, [pid()]} | {error, Reason :: term()}. +dirty_read_s2s_list_pids(FromTo) -> + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [FromTo]). + +try_register(Pid, ShouldWriteF, FromTo) -> + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [Pid, ShouldWriteF, FromTo]). + +remove_connection(FromTo, Pid) -> + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [FromTo, Pid]). + +node_cleanup(Node) -> + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [Node]). + +register_secret(HostType, Source, Secret) -> + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [HostType, Source, Secret]). + +get_shared_secret(HostType) -> + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [HostType]). diff --git a/src/ejabberd_s2s_cets.erl b/src/ejabberd_s2s_cets.erl new file mode 100644 index 00000000000..48563a5a378 --- /dev/null +++ b/src/ejabberd_s2s_cets.erl @@ -0,0 +1,60 @@ +-module(ejabberd_s2s_cets). +-export([init/1, + dirty_read_s2s_list_pids/1, + try_register/3, + remove_connection/2, + node_cleanup/1]). + +-export([register_secret/3, + get_shared_secret/1]). + +-include("mongoose_logger.hrl"). + +-define(TABLE, cets_s2s_session). +-define(SECRET_TABLE, cets_s2s_secret). + +init(_) -> + cets:start(?TABLE, #{}), + cets:start(?SECRET_TABLE, #{}), + cets_discovery:add_table(mongoose_cets_discovery, ?TABLE), + cets_discovery:add_table(mongoose_cets_discovery, ?SECRET_TABLE). + +%% Pid lists +dirty_read_s2s_list_pids(FromTo) -> + R = {{FromTo, '$1'}}, + Pids = ets:select(?TABLE, [{R, [], ['$1']}]), + {ok, Pids}. + +try_register(Pid, ShouldWriteF, FromTo) -> + L = dirty_read_s2s_list_pids(FromTo), + case ShouldWriteF(L) of + true -> + cets:insert(?TABLE, {{FromTo, Pid}}), + true; + false -> + false + end. + +remove_connection(FromTo, Pid) -> + cets:delete(?TABLE, {FromTo, Pid}), + ok. + +%% node_cleanup is called on each node in the cluster, when Node is down +node_cleanup(Node) -> + KeyPattern = {'_', '$1'}, + R = {KeyPattern}, + Guard = {'==', {node, '$1'}, Node}, + ets:select_delete(?TABLE, [{R, [Guard], [true]}]). + +%% Secrets +register_secret(HostType, Source, Secret) -> + cets:insert(?SECRET_TABLE, {HostType, Source, Secret}), + ok. + +get_shared_secret(HostType) -> + case ets:lookup(?SECRET_TABLE, HostType) of + [{_HostType, Source, Secret}] -> + {ok, {Source, Secret}}; + [] -> + {error, not_found} + end. diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index 9617afdcf34..d611a363236 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -127,7 +127,7 @@ start_listener(Opts) -> %%---------------------------------------------------------------------- -spec init([socket() | options(), ...]) -> {ok, wait_for_stream, state()}. init([Socket, #{shaper := Shaper, tls := TLSOpts}]) -> - ?LOG_DEBUG(#{what => s2n_in_started, + ?LOG_DEBUG(#{what => s2s_in_started, text => <<"New incoming S2S connection">>, socket => Socket}), Timer = erlang:start_timer(ejabberd_s2s:timeout(), self(), []), @@ -177,10 +177,13 @@ wait_for_stream({xmlstreamstart, _Name, Attrs}, StateData) -> stream_start_error(StateData, mongoose_xmpp_errors:invalid_namespace()) end; wait_for_stream({xmlstreamerror, _}, StateData) -> + ?LOG_WARNING(#{what => s2s_in_wait_for_stream_error}), stream_start_error(StateData, mongoose_xmpp_errors:xml_not_well_formed()); wait_for_stream(timeout, StateData) -> + ?LOG_WARNING(#{what => s2s_in_wait_for_stream_timeout}), {stop, normal, StateData}; wait_for_stream(closed, StateData) -> + ?LOG_WARNING(#{what => s2s_in_wait_for_stream_closed}), {stop, normal, StateData}. start_stream(#{<<"version">> := <<"1.0">>, <<"from">> := RemoteServer}, @@ -196,17 +199,17 @@ start_stream(#{<<"version">> := <<"1.0">>, <<"from">> := RemoteServer}, StartTLS = get_tls_xmlel(StateData), case SASL of {error_cert_verif, CertError} -> - ?LOG_INFO(#{what => s2s_connection_closing, - text => <<"Closing s2s connection">>, - server => StateData#state.server, - remote_server => RemoteServer, - reason => cert_error, - cert_error => CertError}), + ?LOG_WARNING(#{what => s2s_connection_closing, + text => <<"Closing s2s connection">>, + server => StateData#state.server, + remote_server => RemoteServer, + reason => cert_error, + cert_error => CertError}), Res = stream_start_error(StateData, mongoose_xmpp_errors:policy_violation(?MYLANG, CertError)), %% FIXME: why do we want stop just one of the connections here? - {atomic, Pid} = ejabberd_s2s:find_connection(jid:make(<<>>, Server, <<>>), - jid:make(<<>>, RemoteServer, <<>>)), + {ok, Pid} = ejabberd_s2s:find_connection(jid:make(<<>>, Server, <<>>), + jid:make(<<>>, RemoteServer, <<>>)), ejabberd_s2s_out:stop_connection(Pid), Res; _ -> @@ -232,6 +235,7 @@ stream_start_error(StateData, Error) -> send_text(StateData, ?STREAM_HEADER(<<>>)), send_element(StateData, Error), send_text(StateData, ?STREAM_TRAILER), + ?LOG_WARNING(#{what => s2s_in_stream_start_error}), {stop, normal, StateData}. -spec wait_for_feature_request(ejabberd:xml_stream_item(), state() @@ -270,6 +274,7 @@ wait_for_feature_request({xmlstreamelement, El}, StateData) -> #xmlel{name = <<"failure">>, attrs = [{<<"xmlns">>, ?NS_SASL}], children = [#xmlel{name = <<"invalid-mechanism">>}]}), + ?LOG_WARNING(#{what => s2s_in_invalid_mechanism}), {stop, normal, StateData} end; _ -> @@ -277,12 +282,15 @@ wait_for_feature_request({xmlstreamelement, El}, StateData) -> end; wait_for_feature_request({xmlstreamend, _Name}, StateData) -> send_text(StateData, ?STREAM_TRAILER), + ?LOG_WARNING(#{what => s2s_in_got_stream_end_before_feature_request}), {stop, normal, StateData}; wait_for_feature_request({xmlstreamerror, _}, StateData) -> send_element(StateData, mongoose_xmpp_errors:xml_not_well_formed()), send_text(StateData, ?STREAM_TRAILER), + ?LOG_WARNING(#{what => s2s_in_got_stream_error_before_feature_request}), {stop, normal, StateData}; wait_for_feature_request(closed, StateData) -> + ?LOG_WARNING(#{what => s2s_in_got_closed_before_feature_request}), {stop, normal, StateData}. tls_options_with_certfile(#state{host_type = HostType, tls_options = TLSOptions}) -> @@ -320,9 +328,11 @@ stream_established({xmlstreamelement, El}, StateData) -> timer = Timer}}; {_, false} -> send_element(StateData, mongoose_xmpp_errors:host_unknown()), + ?LOG_WARNING(#{what => s2s_in_key_from_uknown_host}), {stop, normal, StateData}; {false, _} -> send_element(StateData, mongoose_xmpp_errors:invalid_from()), + ?LOG_WARNING(#{what => s2s_in_key_with_invalid_from}), {stop, normal, StateData} end; {verify, To, From, Id, Key} -> @@ -334,6 +344,8 @@ stream_established({xmlstreamelement, El}, StateData) -> Key -> <<"valid">>; _ -> <<"invalid">> end, + %% XEP-0185: Dialback Key Generation and Validation + %% DB means dial-back send_element(StateData, #xmlel{name = <<"db:verify">>, attrs = [{<<"from">>, To}, @@ -385,6 +397,7 @@ stream_established({xmlstreamend, _Name}, StateData) -> stream_established({xmlstreamerror, _}, StateData) -> send_element(StateData, mongoose_xmpp_errors:xml_not_well_formed()), send_text(StateData, ?STREAM_TRAILER), + ?LOG_WARNING(#{what => s2s_in_stream_error, state_name => stream_established}), {stop, normal, StateData}; stream_established(timeout, StateData) -> {stop, normal, StateData}; @@ -542,8 +555,8 @@ handle_info(_, StateName, StateData) -> %% Returns: any %%---------------------------------------------------------------------- -spec terminate(any(), statename(), state()) -> 'ok'. -terminate(Reason, _StateName, StateData) -> - ?LOG_DEBUG(#{what => s2s_in_stopped, reason => Reason}), +terminate(Reason, StateName, StateData) -> + ?LOG_DEBUG(#{what => s2s_in_stopped, reason => Reason, state_name => StateName}), mongoose_transport:close(StateData#state.socket), ok. @@ -688,6 +701,7 @@ handle_auth_res(_, _, StateData) -> #xmlel{name = <<"failure">>, attrs = [{<<"xmlns">>, ?NS_SASL}]}), send_text(StateData, ?STREAM_TRAILER), + ?LOG_WARNING(#{what => s2s_in_auth_failed}), {stop, normal, StateData}. diff --git a/src/ejabberd_s2s_mnesia.erl b/src/ejabberd_s2s_mnesia.erl new file mode 100644 index 00000000000..4d22e42c32b --- /dev/null +++ b/src/ejabberd_s2s_mnesia.erl @@ -0,0 +1,96 @@ +-module(ejabberd_s2s_mnesia). +-export([init/1, + dirty_read_s2s_list_pids/1, + try_register/3, + remove_connection/2, + node_cleanup/1]). + +-export([register_secret/3, + get_shared_secret/1]). + +-record(s2s, { + fromto :: ejabberd_s2s:fromto() | '_', + pid :: pid() | '$1' + }). + +-record(s2s_secret, {host_type, source, secret}). + +-include("mongoose_logger.hrl"). + +init(_) -> + init_pids(), + init_secrets(). + +%% Pid lists +init_pids() -> + Opts = [{ram_copies, [node()]}, {type, bag}, + {attributes, record_info(fields, s2s)}], + mnesia:create_table(s2s, Opts), + mnesia:add_table_copy(s2s, node(), ram_copies). + +dirty_read_s2s_list_pids(FromTo) -> + {ok, s2s_to_pids(mnesia:dirty_read(s2s, FromTo))}. + +try_register(Pid, ShouldWriteF, FromTo) -> + F = fun() -> + L = s2s_to_pids(mnesia:read({s2s, FromTo})), + case ShouldWriteF(L) of + true -> + mnesia:write(#s2s{fromto = FromTo, pid = Pid}), + true; + false -> + false + end + end, + case mnesia:transaction(F) of + {atomic, Bool} -> + Bool; + Other -> + ?LOG_ERROR(#{what => s2s_try_register_failed, + s2s_pid => Pid, from_to => FromTo, + reason => Other}), + false + end. + +remove_connection(FromTo, Pid) -> + Rec = #s2s{fromto = FromTo, pid = Pid}, + F = fun() -> + mnesia:delete_object(Rec) + end, + {atomic, _} = mnesia:transaction(F), + ok. + +node_cleanup(Node) -> + F = fun() -> + Es = mnesia:select( + s2s, + [{#s2s{pid = '$1', _ = '_'}, + [{'==', {node, '$1'}, Node}], + ['$_']}]), + lists:foreach(fun(E) -> + mnesia:delete_object(E) + end, Es) + end, + mnesia:async_dirty(F). + +s2s_to_pids(List) -> + [Pid || #s2s{pid = Pid} <- List]. + +%% Secrets +init_secrets() -> + Opts = [{ram_copies, [node()]}, {attributes, record_info(fields, s2s_secret)}], + mnesia:create_table(s2s_secret, Opts), + mnesia:add_table_copy(s2s_secret, node(), ram_copies). + +register_secret(HostType, Source, Secret) -> + Rec = #s2s_secret{host_type = HostType, source = Source, secret = Secret}, + {atomic, _} = mnesia:transaction(fun() -> mnesia:write(Rec) end), + ok. + +get_shared_secret(HostType) -> + case mnesia:dirty_read(s2s_secret, HostType) of + [#s2s_secret{source = Source, secret = Secret}] -> + {ok, {Source, Secret}}; + [] -> + {error, not_found} + end. diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index 9b8a538f457..94e053c7c07 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -336,6 +336,8 @@ wait_for_validation({xmlstreamelement, El}, StateData) -> from => From, to => To, message_id => Id, type => Type}), case {Type, StateData#state.tls_enabled, StateData#state.tls_required} of {<<"valid">>, Enabled, Required} when (Enabled==true) or (Required==false) -> + %% Initiating Server Receives Valid Verification Result from Receiving Server (Step 4) + %% https://xmpp.org/extensions/xep-0220.html#example-2 send_queue(StateData, StateData#state.queue), ?LOG_INFO(#{what => s2s_out_connected, text => <<"New outgoing s2s connection established">>, @@ -453,10 +455,10 @@ wait_for_auth_result({xmlstreamelement, El}, StateData) -> #xmlel{name = <<"failure">>, attrs = Attrs} -> case xml:get_attr_s(<<"xmlns">>, Attrs) of ?NS_SASL -> - ?LOG_INFO(#{what => s2s_auth_failure, - text => <<"Received failure result in ejabberd_s2s_out. Restarting">>, - myname => StateData#state.myname, - server => StateData#state.server}), + ?LOG_WARNING(#{what => s2s_auth_failure, + text => <<"Received failure result in ejabberd_s2s_out. Restarting">>, + myname => StateData#state.myname, + server => StateData#state.server}), mongoose_transport:close(StateData#state.socket), {next_state, reopen_socket, StateData#state{socket = undefined}, ?FSMTIMEOUT}; @@ -703,6 +705,15 @@ terminate(Reason, StateName, StateData) -> E = mongoose_xmpp_errors:remote_server_not_found(<<"en">>, <<"Bounced by s2s">>), %% bounce queue manage by process and Erlang message queue bounce_queue(StateData#state.queue, E), + case queue:is_empty(StateData#state.queue) of + true -> + ok; + false -> + ?LOG_WARNING(#{what => s2s_terminate_non_empty, + state_name => StateName, + queue => lists:sublist(queue:to_list(StateData#state.queue), 10), + authenticated => StateData#state.authenticated}) + end, bounce_messages(E), case StateData#state.socket of undefined -> @@ -822,6 +833,8 @@ send_db_request(StateData) -> StateData#state.host_type, {StateData#state.myname, Server}, StateData#state.remote_streamid), + %% Initiating Server Sends Dialback Key + %% https://xmpp.org/extensions/xep-0220.html#example-1 send_element(StateData, #xmlel{name = <<"db:result">>, attrs = [{<<"from">>, StateData#state.myname}, @@ -1093,15 +1106,11 @@ addr_type(Addr) when tuple_size(Addr) =:= 4 -> inet; addr_type(Addr) when tuple_size(Addr) =:= 8 -> inet6. send_event(<<"valid">>, Pid, StateData) -> - p1_fsm:send_event( - Pid, {valid, - StateData#state.server, - StateData#state.myname}); + Event = {valid, StateData#state.server, StateData#state.myname}, + p1_fsm:send_event(Pid, Event); send_event(_, Pid, StateData) -> - p1_fsm:send_event( - Pid, {invalid, - StateData#state.server, - StateData#state.myname}). + Event = {invalid, StateData#state.server, StateData#state.myname}, + p1_fsm:send_event(Pid, Event). get_acc_with_new_sext(?NS_SASL, Els1, {_SEXT, STLS, STLSReq}) -> NewSEXT = From b4cffd785ab863b4dd3e73698ec7c2f0c2cb8700 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 29 Jun 2023 15:49:28 +0200 Subject: [PATCH 069/161] Rename ejabberd_s2s_backend to mongoose_s2s_backend We do not want to move ejabberd_s2s module though in the feature branch. It would make merging to upstream too difficult. --- big_tests/test.config | 1 + src/config/mongoose_config_spec.erl | 2 +- src/ejabberd_s2s.erl | 18 +++++++++--------- .../mongoose_s2s_backend.erl} | 4 ++-- .../mongoose_s2s_cets.erl} | 4 ++-- .../mongoose_s2s_mnesia.erl} | 2 +- 6 files changed, 16 insertions(+), 15 deletions(-) rename src/{ejabberd_s2s_backend.erl => s2s/mongoose_s2s_backend.erl} (95%) rename src/{ejabberd_s2s_cets.erl => s2s/mongoose_s2s_cets.erl} (95%) rename src/{ejabberd_s2s_mnesia.erl => s2s/mongoose_s2s_mnesia.erl} (98%) diff --git a/big_tests/test.config b/big_tests/test.config index c1f15dbffee..2e8f86de60d 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -242,6 +242,7 @@ {sm_backend, "\"cets\""}, {bosh_backend, "\"cets\""}, {component_backend, "\"cets\""}, + {s2s_backend, "\"cets\""}, {stream_management_backend, cets}, {auth_method, "rdbms"}, {internal_databases, "[internal_databases.cets] diff --git a/src/config/mongoose_config_spec.erl b/src/config/mongoose_config_spec.erl index 6c9e5ba67ba..e1db98e595e 100644 --- a/src/config/mongoose_config_spec.erl +++ b/src/config/mongoose_config_spec.erl @@ -175,7 +175,7 @@ general() -> validate = {module, mongoose_component}, wrap = global_config}, <<"s2s_backend">> => #option{type = atom, - validate = {module, ejabberd_s2s}, + validate = {module, mongoose_s2s}, wrap = global_config}, <<"max_fsm_queue">> => #option{type = integer, validate = positive, diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index d52c181a1b8..10e89665f05 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -352,7 +352,7 @@ max_s2s_connections_number_per_node({From, To}) -> -spec needed_connections_number([pid()], pos_integer(), pos_integer()) -> integer(). needed_connections_number(Ls, MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode) -> + MaxS2SConnectionsNumberPerNode) when is_list(Ls) -> LocalLs = [L || L <- Ls, node(L) == node()], lists:min([MaxS2SConnectionsNumber - length(Ls), MaxS2SConnectionsNumberPerNode - length(LocalLs)]). @@ -361,7 +361,7 @@ should_write_f(FromTo) -> MaxS2SConnectionsNumber = max_s2s_connections_number(FromTo), MaxS2SConnectionsNumberPerNode = max_s2s_connections_number_per_node(FromTo), - fun(L) -> + fun(L) when is_list(L) -> NeededConnections = needed_connections_number( L, MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode), @@ -534,12 +534,12 @@ lookup_certfile(HostType) -> db_init() -> Backend = mongoose_config:get_opt(s2s_backend), - ejabberd_s2s_backend:init(#{backend => Backend}). + mongoose_s2s_backend:init(#{backend => Backend}). -spec dirty_read_s2s_list_pids(FromTo :: fromto()) -> {ok, [pid()]} | {error, Reason :: term()}. dirty_read_s2s_list_pids(FromTo) -> try - ejabberd_s2s_backend:dirty_read_s2s_list_pids(FromTo) + mongoose_s2s_backend:dirty_read_s2s_list_pids(FromTo) catch Class:Reason:Stacktrace -> ?LOG_ERROR(#{what => s2s_dirty_read_s2s_list_failed, from_to => FromTo, @@ -551,18 +551,18 @@ dirty_read_s2s_list_pids(FromTo) -> %% Returns true if the connection is registered -spec call_try_register(Pid :: pid(), ShouldWriteF :: fun(), FromTo :: fromto()) -> boolean(). call_try_register(Pid, ShouldWriteF, FromTo) -> - ejabberd_s2s_backend:try_register(Pid, ShouldWriteF, FromTo). + mongoose_s2s_backend:try_register(Pid, ShouldWriteF, FromTo). call_node_cleanup(Node) -> - ejabberd_s2s_backend:node_cleanup(Node). + mongoose_s2s_backend:node_cleanup(Node). call_remove_connection(FromTo, Pid) -> - ejabberd_s2s_backend:remove_connection(FromTo, Pid). + mongoose_s2s_backend:remove_connection(FromTo, Pid). -spec get_shared_secret(mongooseim:host_type()) -> {ok, {secret_source(), base16_secret()}} | {error, not_found}. get_shared_secret(HostType) -> - ejabberd_s2s_backend:get_shared_secret(HostType). + mongoose_s2s_backend:get_shared_secret(HostType). -spec register_secret(mongooseim:host_type(), ejabberd_s2s:secret_source(), ejabberd_s2s:base16_secret()) -> ok. register_secret(HostType, Source, Secret) -> - ejabberd_s2s_backend:register_secret(HostType, Source, Secret). + mongoose_s2s_backend:register_secret(HostType, Source, Secret). diff --git a/src/ejabberd_s2s_backend.erl b/src/s2s/mongoose_s2s_backend.erl similarity index 95% rename from src/ejabberd_s2s_backend.erl rename to src/s2s/mongoose_s2s_backend.erl index 54489f85bfc..98110a3129d 100644 --- a/src/ejabberd_s2s_backend.erl +++ b/src/s2s/mongoose_s2s_backend.erl @@ -1,4 +1,4 @@ --module(ejabberd_s2s_backend). +-module(mongoose_s2s_backend). -callback init(map()) -> any(). @@ -16,7 +16,7 @@ -ignore_xref([behaviour_info/1]). --define(MAIN_MODULE, ejabberd_s2s). +-define(MAIN_MODULE, mongoose_s2s). -spec init(map()) -> any(). init(Opts) -> diff --git a/src/ejabberd_s2s_cets.erl b/src/s2s/mongoose_s2s_cets.erl similarity index 95% rename from src/ejabberd_s2s_cets.erl rename to src/s2s/mongoose_s2s_cets.erl index 48563a5a378..625c2118045 100644 --- a/src/ejabberd_s2s_cets.erl +++ b/src/s2s/mongoose_s2s_cets.erl @@ -1,4 +1,4 @@ --module(ejabberd_s2s_cets). +-module(mongoose_s2s_cets). -export([init/1, dirty_read_s2s_list_pids/1, try_register/3, @@ -26,7 +26,7 @@ dirty_read_s2s_list_pids(FromTo) -> {ok, Pids}. try_register(Pid, ShouldWriteF, FromTo) -> - L = dirty_read_s2s_list_pids(FromTo), + {ok, L} = dirty_read_s2s_list_pids(FromTo), case ShouldWriteF(L) of true -> cets:insert(?TABLE, {{FromTo, Pid}}), diff --git a/src/ejabberd_s2s_mnesia.erl b/src/s2s/mongoose_s2s_mnesia.erl similarity index 98% rename from src/ejabberd_s2s_mnesia.erl rename to src/s2s/mongoose_s2s_mnesia.erl index 4d22e42c32b..d997db960bb 100644 --- a/src/ejabberd_s2s_mnesia.erl +++ b/src/s2s/mongoose_s2s_mnesia.erl @@ -1,4 +1,4 @@ --module(ejabberd_s2s_mnesia). +-module(mongoose_s2s_mnesia). -export([init/1, dirty_read_s2s_list_pids/1, try_register/3, From 849c4930a167f39b4a3fc1c02a85c6b713d5c48f Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 29 Jun 2023 18:15:55 +0200 Subject: [PATCH 070/161] Fix xref (add missing callbacks into mongoose_s2s_backend) --- src/s2s/mongoose_s2s_backend.erl | 23 +++++++++++++++++++++-- src/s2s/mongoose_s2s_cets.erl | 2 ++ src/s2s/mongoose_s2s_mnesia.erl | 2 ++ 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/src/s2s/mongoose_s2s_backend.erl b/src/s2s/mongoose_s2s_backend.erl index 98110a3129d..f4f31ede136 100644 --- a/src/s2s/mongoose_s2s_backend.erl +++ b/src/s2s/mongoose_s2s_backend.erl @@ -1,9 +1,18 @@ -module(mongoose_s2s_backend). --callback init(map()) -> - any(). +-callback init(map()) -> any(). -callback dirty_read_s2s_list_pids(ejabberd_s2s:fromto()) -> {ok, [pid()]} | {error, Reason :: term()}. +-callback try_register(Pid :: pid(), + ShouldWriteF :: fun(), + FromTo :: ejabberd_s2s:fromto()) -> boolean(). +-callback remove_connection(FromTo :: ejabberd_s2s:fromto(), Pid :: pid()) -> ok. +-callback node_cleanup(Node :: node()) -> term(). +-callback register_secret(HostType :: mongooseim:host_type(), + Source :: ejabberd_s2s:secret_source(), + Secret :: ejabberd_s2s:base16_secret()) -> ok. +-callback get_shared_secret(mongooseim:host_type()) -> + {ok, {ejabberd_s2s:secret_source(), ejabberd_s2s:base16_secret()}} | {error, not_found}. -export([init/1, dirty_read_s2s_list_pids/1, @@ -29,17 +38,27 @@ init(Opts) -> dirty_read_s2s_list_pids(FromTo) -> mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [FromTo]). +-spec try_register(Pid :: pid(), + ShouldWriteF :: fun(), + FromTo :: ejabberd_s2s:fromto()) -> boolean(). try_register(Pid, ShouldWriteF, FromTo) -> mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [Pid, ShouldWriteF, FromTo]). +-spec remove_connection(FromTo :: ejabberd_s2s:fromto(), Pid :: pid()) -> ok. remove_connection(FromTo, Pid) -> mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [FromTo, Pid]). +-spec node_cleanup(Node :: node()) -> term(). node_cleanup(Node) -> mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [Node]). +-spec register_secret(HostType :: mongooseim:host_type(), + Source :: ejabberd_s2s:secret_source(), + Secret :: ejabberd_s2s:base16_secret()) -> ok. register_secret(HostType, Source, Secret) -> mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [HostType, Source, Secret]). +-spec get_shared_secret(mongooseim:host_type()) -> + {ok, {ejabberd_s2s:secret_source(), ejabberd_s2s:base16_secret()}} | {error, not_found}. get_shared_secret(HostType) -> mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [HostType]). diff --git a/src/s2s/mongoose_s2s_cets.erl b/src/s2s/mongoose_s2s_cets.erl index 625c2118045..204720042b0 100644 --- a/src/s2s/mongoose_s2s_cets.erl +++ b/src/s2s/mongoose_s2s_cets.erl @@ -1,4 +1,6 @@ -module(mongoose_s2s_cets). +-behaviour(mongoose_s2s_backend). + -export([init/1, dirty_read_s2s_list_pids/1, try_register/3, diff --git a/src/s2s/mongoose_s2s_mnesia.erl b/src/s2s/mongoose_s2s_mnesia.erl index d997db960bb..86ebf9c897e 100644 --- a/src/s2s/mongoose_s2s_mnesia.erl +++ b/src/s2s/mongoose_s2s_mnesia.erl @@ -1,4 +1,6 @@ -module(mongoose_s2s_mnesia). +-behaviour(mongoose_s2s_backend). + -export([init/1, dirty_read_s2s_list_pids/1, try_register/3, From bc397d30df1715e8f28ba2d763862a9c5d3e8e19 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 29 Jun 2023 18:25:53 +0200 Subject: [PATCH 071/161] Fix config parser for s2s --- test/common/config_parser_helper.erl | 6 ++++++ test/config_parser_SUITE.erl | 7 +++++++ test/mongoose_cleanup_SUITE.erl | 1 + 3 files changed, 14 insertions(+) diff --git a/test/common/config_parser_helper.erl b/test/common/config_parser_helper.erl index f12f4ad1ef5..527eba36bbc 100644 --- a/test/common/config_parser_helper.erl +++ b/test/common/config_parser_helper.erl @@ -28,6 +28,7 @@ options("host_types") -> event_max_age => 5000})}}, {sm_backend, mnesia}, {component_backend, mnesia}, + {s2s_backend, mnesia}, {{s2s, <<"another host type">>}, default_s2s()}, {{s2s, <<"localhost">>}, default_s2s()}, {{s2s, <<"some host type">>}, default_s2s()}, @@ -99,6 +100,7 @@ options("miscellaneous") -> {{s2s, <<"localhost">>}, default_s2s()}, {sm_backend, mnesia}, {component_backend, mnesia}, + {s2s_backend, mnesia}, {{auth, <<"anonymous.localhost">>}, custom_auth()}, {{auth, <<"localhost">>}, custom_auth()}, {{modules, <<"anonymous.localhost">>}, #{}}, @@ -127,6 +129,7 @@ options("modules") -> {{s2s, <<"localhost">>}, default_s2s()}, {sm_backend, mnesia}, {component_backend, mnesia}, + {s2s_backend, mnesia}, {{auth, <<"dummy_host">>}, default_auth()}, {{auth, <<"localhost">>}, default_auth()}, {{modules, <<"dummy_host">>}, all_modules()}, @@ -265,6 +268,7 @@ options("mongooseim-pgsql") -> periodic_report => 10800000}}}, {sm_backend, mnesia}, {component_backend, mnesia}, + {s2s_backend, mnesia}, {{auth, <<"anonymous.localhost">>}, (default_auth())#{anonymous => #{allow_multiple_connections => true, protocol => both}, @@ -361,6 +365,7 @@ options("outgoing_pools") -> {{s2s, <<"localhost.bis">>}, default_s2s()}, {sm_backend, mnesia}, {component_backend, mnesia}, + {s2s_backend, mnesia}, {{auth, <<"anonymous.localhost">>}, default_auth()}, {{auth, <<"localhost">>}, default_auth()}, {{auth, <<"localhost.bis">>}, default_auth()}, @@ -388,6 +393,7 @@ options("s2s_only") -> {services, #{}}, {sm_backend, mnesia}, {component_backend, mnesia}, + {s2s_backend, mnesia}, {{auth, <<"dummy_host">>}, default_auth()}, {{auth, <<"localhost">>}, default_auth()}, {{modules, <<"dummy_host">>}, #{}}, diff --git a/test/config_parser_SUITE.erl b/test/config_parser_SUITE.erl index 6ff4e43f46d..afef8b760c4 100644 --- a/test/config_parser_SUITE.erl +++ b/test/config_parser_SUITE.erl @@ -76,6 +76,7 @@ groups() -> all_metrics_are_global, sm_backend, component_backend, + s2s_backend, max_fsm_queue, http_server_name, rdbms_server_type, @@ -390,6 +391,12 @@ component_backend(_Config) -> ?cfg(component_backend, cets, #{<<"general">> => #{<<"component_backend">> => <<"cets">>}}), ?err(#{<<"general">> => #{<<"component_backend">> => <<"amnesia">>}}). +s2s_backend(_Config) -> + ?cfg(s2s_backend, mnesia, #{}), % default + ?cfg(s2s_backend, mnesia, #{<<"general">> => #{<<"s2s_backend">> => <<"mnesia">>}}), + ?err(#{<<"general">> => #{<<"s2s_backend">> => <<"redis">>}}), + ?err(#{<<"general">> => #{<<"s2s_backend">> => <<"amnesia">>}}). + max_fsm_queue(_Config) -> ?cfg(max_fsm_queue, 100, #{<<"general">> => #{<<"max_fsm_queue">> => 100}}), ?err(#{<<"general">> => #{<<"max_fsm_queue">> => -10}}). diff --git a/test/mongoose_cleanup_SUITE.erl b/test/mongoose_cleanup_SUITE.erl index 428b95b2178..739f87990ef 100644 --- a/test/mongoose_cleanup_SUITE.erl +++ b/test/mongoose_cleanup_SUITE.erl @@ -108,6 +108,7 @@ opts() -> [{hosts, [?HOST]}, {host_types, []}, {all_metrics_are_global, false}, + {s2s_backend, mnesia}, {{modules, ?HOST}, #{}}]. meck_mods(bosh) -> [exometer, mod_bosh_socket]; From 72c7a8cce634788d31b72b60d4ba9f1ab3fa5707 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 29 Jun 2023 20:13:13 +0200 Subject: [PATCH 072/161] Make mongoose_listener_sup:start_child more verbose --- src/mongoose_listener_sup.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/mongoose_listener_sup.erl b/src/mongoose_listener_sup.erl index 9191cdea91b..8628b5124b3 100644 --- a/src/mongoose_listener_sup.erl +++ b/src/mongoose_listener_sup.erl @@ -16,7 +16,8 @@ start_link() -> -spec start_child(supervisor:child_spec()) -> ok. start_child(ChildSpec) -> - {ok, _Pid} = supervisor:start_child(?MODULE, ChildSpec), + Res = supervisor:start_child(?MODULE, ChildSpec), + check_start_child_result(Res, ChildSpec), ok. %% Supervisor callbacks @@ -26,3 +27,5 @@ init([]) -> {ok, {#{strategy => one_for_one, intensity => 10, period => 1}, []}}. + +check_start_child_result({ok, _Pid}, _ChildSpec) -> ok. From 6dd4d432cfd3cede5fa1ef026b11e3762cbb18e7 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 29 Jun 2023 20:13:25 +0200 Subject: [PATCH 073/161] Fix mongoose_config_SUITE --- test/mongoose_config_SUITE.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/test/mongoose_config_SUITE.erl b/test/mongoose_config_SUITE.erl index 61e228607de..4716c793996 100644 --- a/test/mongoose_config_SUITE.erl +++ b/test/mongoose_config_SUITE.erl @@ -190,6 +190,7 @@ minimal_config_opts() -> {services, #{}}, {sm_backend, mnesia}, {component_backend, mnesia}, + {s2s_backend, mnesia}, {{auth, <<"localhost">>}, config_parser_helper:default_auth()}, {{modules, <<"localhost">>}, #{}}, {{replaced_wait_timeout, <<"localhost">>}, 2000}, From cf6a87fdd2df9fa76bacb8dc993ff2b9fa7e00c0 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 5 Jul 2023 10:32:00 +0200 Subject: [PATCH 074/161] Remove ejabberd_s2s:have_connection/1 --- src/ejabberd_s2s.erl | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index 10e89665f05..846f1a87645 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -35,7 +35,6 @@ -export([start_link/0, filter/4, route/4, - have_connection/1, key/3, get_connections_pids/1, try_register/1, @@ -57,7 +56,7 @@ %% ejabberd API -export([get_info_s2s_connections/1]). --ignore_xref([get_info_s2s_connections/1, have_connection/1, +-ignore_xref([get_info_s2s_connections/1, incoming_s2s_number/0, outgoing_s2s_number/0, start_link/0]). -include("mongoose.hrl"). @@ -103,9 +102,6 @@ remove_connection(FromTo, Pid) -> end, ok. -have_connection(FromTo) -> - get_connections_pids(FromTo) =/= []. - -spec get_connections_pids(_) -> [pid()]. get_connections_pids(FromTo) -> case dirty_read_s2s_list_pids(FromTo) of From 3c6c9887bfee12624347abbe8f1f8ba01edd7ae9 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 6 Jul 2023 13:13:17 +0200 Subject: [PATCH 075/161] Rename dirty_read_s2s_list_pids to get_s2s_out_pids --- src/ejabberd_s2s.erl | 11 ++++++----- src/s2s/mongoose_s2s_backend.erl | 9 +++++---- src/s2s/mongoose_s2s_cets.erl | 6 +++--- src/s2s/mongoose_s2s_mnesia.erl | 4 ++-- 4 files changed, 16 insertions(+), 14 deletions(-) diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index 846f1a87645..4b7bb747236 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -104,7 +104,7 @@ remove_connection(FromTo, Pid) -> -spec get_connections_pids(_) -> [pid()]. get_connections_pids(FromTo) -> - case dirty_read_s2s_list_pids(FromTo) of + case get_s2s_out_pids(FromTo) of {ok, L} when is_list(L) -> L; {error, _} -> @@ -231,7 +231,7 @@ find_connection(From, To, Retries) -> MaxS2SConnectionsNumberPerNode = max_s2s_connections_number_per_node(FromTo), ?LOG_DEBUG(#{what => s2s_find_connection, from_server => MyServer, to_server => Server}), - case dirty_read_s2s_list_pids(FromTo) of + case get_s2s_out_pids(FromTo) of {error, Reason} -> {error, Reason}; {ok, []} -> @@ -532,10 +532,11 @@ db_init() -> Backend = mongoose_config:get_opt(s2s_backend), mongoose_s2s_backend:init(#{backend => Backend}). --spec dirty_read_s2s_list_pids(FromTo :: fromto()) -> {ok, [pid()]} | {error, Reason :: term()}. -dirty_read_s2s_list_pids(FromTo) -> +%% Get ejabberd_s2s_out pids +-spec get_s2s_out_pids(FromTo :: fromto()) -> {ok, [pid()]} | {error, Reason :: term()}. +get_s2s_out_pids(FromTo) -> try - mongoose_s2s_backend:dirty_read_s2s_list_pids(FromTo) + mongoose_s2s_backend:get_s2s_out_pids(FromTo) catch Class:Reason:Stacktrace -> ?LOG_ERROR(#{what => s2s_dirty_read_s2s_list_failed, from_to => FromTo, diff --git a/src/s2s/mongoose_s2s_backend.erl b/src/s2s/mongoose_s2s_backend.erl index f4f31ede136..6cb1cac5ef6 100644 --- a/src/s2s/mongoose_s2s_backend.erl +++ b/src/s2s/mongoose_s2s_backend.erl @@ -1,7 +1,7 @@ -module(mongoose_s2s_backend). -callback init(map()) -> any(). --callback dirty_read_s2s_list_pids(ejabberd_s2s:fromto()) -> +-callback get_s2s_out_pids(ejabberd_s2s:fromto()) -> {ok, [pid()]} | {error, Reason :: term()}. -callback try_register(Pid :: pid(), ShouldWriteF :: fun(), @@ -15,7 +15,7 @@ {ok, {ejabberd_s2s:secret_source(), ejabberd_s2s:base16_secret()}} | {error, not_found}. -export([init/1, - dirty_read_s2s_list_pids/1, + get_s2s_out_pids/1, try_register/3, remove_connection/2, node_cleanup/1]). @@ -33,11 +33,12 @@ init(Opts) -> mongoose_backend:init(global, ?MAIN_MODULE, [], Opts), mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). --spec dirty_read_s2s_list_pids(ejabberd_s2s:fromto()) -> +-spec get_s2s_out_pids(ejabberd_s2s:fromto()) -> {ok, [pid()]} | {error, Reason :: term()}. -dirty_read_s2s_list_pids(FromTo) -> +get_s2s_out_pids(FromTo) -> mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [FromTo]). +%% Register ejabberd_s2s_out connection -spec try_register(Pid :: pid(), ShouldWriteF :: fun(), FromTo :: ejabberd_s2s:fromto()) -> boolean(). diff --git a/src/s2s/mongoose_s2s_cets.erl b/src/s2s/mongoose_s2s_cets.erl index 204720042b0..1ad7688ce80 100644 --- a/src/s2s/mongoose_s2s_cets.erl +++ b/src/s2s/mongoose_s2s_cets.erl @@ -2,7 +2,7 @@ -behaviour(mongoose_s2s_backend). -export([init/1, - dirty_read_s2s_list_pids/1, + get_s2s_out_pids/1, try_register/3, remove_connection/2, node_cleanup/1]). @@ -22,13 +22,13 @@ init(_) -> cets_discovery:add_table(mongoose_cets_discovery, ?SECRET_TABLE). %% Pid lists -dirty_read_s2s_list_pids(FromTo) -> +get_s2s_out_pids(FromTo) -> R = {{FromTo, '$1'}}, Pids = ets:select(?TABLE, [{R, [], ['$1']}]), {ok, Pids}. try_register(Pid, ShouldWriteF, FromTo) -> - {ok, L} = dirty_read_s2s_list_pids(FromTo), + {ok, L} = get_s2s_out_pids(FromTo), case ShouldWriteF(L) of true -> cets:insert(?TABLE, {{FromTo, Pid}}), diff --git a/src/s2s/mongoose_s2s_mnesia.erl b/src/s2s/mongoose_s2s_mnesia.erl index 86ebf9c897e..e7d10e0a111 100644 --- a/src/s2s/mongoose_s2s_mnesia.erl +++ b/src/s2s/mongoose_s2s_mnesia.erl @@ -2,7 +2,7 @@ -behaviour(mongoose_s2s_backend). -export([init/1, - dirty_read_s2s_list_pids/1, + get_s2s_out_pids/1, try_register/3, remove_connection/2, node_cleanup/1]). @@ -30,7 +30,7 @@ init_pids() -> mnesia:create_table(s2s, Opts), mnesia:add_table_copy(s2s, node(), ram_copies). -dirty_read_s2s_list_pids(FromTo) -> +get_s2s_out_pids(FromTo) -> {ok, s2s_to_pids(mnesia:dirty_read(s2s, FromTo))}. try_register(Pid, ShouldWriteF, FromTo) -> From 9cb1d12dd4a51e04204f69d7820476c412237c68 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 6 Jul 2023 13:18:16 +0200 Subject: [PATCH 076/161] Use get_s2s_out_pids function name --- src/ejabberd_s2s.erl | 29 +++++------------------------ src/ejabberd_s2s_out.erl | 2 +- src/s2s/mongoose_s2s_backend.erl | 6 ++---- src/s2s/mongoose_s2s_cets.erl | 5 ++--- src/s2s/mongoose_s2s_mnesia.erl | 4 ++-- test/mongoose_cleanup_SUITE.erl | 4 ++-- 6 files changed, 14 insertions(+), 36 deletions(-) diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index 4b7bb747236..026e9151621 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -36,7 +36,7 @@ filter/4, route/4, key/3, - get_connections_pids/1, + get_s2s_out_pids/1, try_register/1, remove_connection/2, find_connection/2, @@ -102,15 +102,6 @@ remove_connection(FromTo, Pid) -> end, ok. --spec get_connections_pids(_) -> [pid()]. -get_connections_pids(FromTo) -> - case get_s2s_out_pids(FromTo) of - {ok, L} when is_list(L) -> - L; - {error, _} -> - [] - end. - -spec try_register(fromto()) -> boolean(). try_register(FromTo) -> ShouldWriteF = should_write_f(FromTo), @@ -232,9 +223,7 @@ find_connection(From, To, Retries) -> max_s2s_connections_number_per_node(FromTo), ?LOG_DEBUG(#{what => s2s_find_connection, from_server => MyServer, to_server => Server}), case get_s2s_out_pids(FromTo) of - {error, Reason} -> - {error, Reason}; - {ok, []} -> + [] -> %% TODO too complex, and could cause issues on bursts. %% What would happen if connection is denied? %% Start a pool instead maybe? @@ -246,7 +235,7 @@ find_connection(From, To, Retries) -> maybe_open_several_connections(From, To, MyServer, Server, FromTo, MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode, Retries); - {ok, L} when is_list(L) -> + L when is_list(L) -> maybe_open_missing_connections(From, To, MyServer, Server, FromTo, MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode, L, Retries) @@ -533,17 +522,9 @@ db_init() -> mongoose_s2s_backend:init(#{backend => Backend}). %% Get ejabberd_s2s_out pids --spec get_s2s_out_pids(FromTo :: fromto()) -> {ok, [pid()]} | {error, Reason :: term()}. +-spec get_s2s_out_pids(FromTo :: fromto()) -> [pid()]. get_s2s_out_pids(FromTo) -> - try - mongoose_s2s_backend:get_s2s_out_pids(FromTo) - catch Class:Reason:Stacktrace -> - ?LOG_ERROR(#{what => s2s_dirty_read_s2s_list_failed, - from_to => FromTo, - class => Class, reason => Reason, - stacktrace => Stacktrace}), - {error, Reason} - end. + mongoose_s2s_backend:get_s2s_out_pids(FromTo). %% Returns true if the connection is registered -spec call_try_register(Pid :: pid(), ShouldWriteF :: fun(), FromTo :: fromto()) -> boolean(). diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index 94e053c7c07..1d8fe30eb83 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -1062,7 +1062,7 @@ get_max_retry_delay(HostType) -> %% @doc Terminate s2s_out connections that are in state wait_before_retry terminate_if_waiting_delay(From, To) -> FromTo = {From, To}, - Pids = ejabberd_s2s:get_connections_pids(FromTo), + Pids = ejabberd_s2s:get_s2s_out_pids(FromTo), lists:foreach( fun(Pid) -> Pid ! terminate_if_waiting_before_retry diff --git a/src/s2s/mongoose_s2s_backend.erl b/src/s2s/mongoose_s2s_backend.erl index 6cb1cac5ef6..9f352ec8435 100644 --- a/src/s2s/mongoose_s2s_backend.erl +++ b/src/s2s/mongoose_s2s_backend.erl @@ -1,8 +1,7 @@ -module(mongoose_s2s_backend). -callback init(map()) -> any(). --callback get_s2s_out_pids(ejabberd_s2s:fromto()) -> - {ok, [pid()]} | {error, Reason :: term()}. +-callback get_s2s_out_pids(ejabberd_s2s:fromto()) -> [pid()]. -callback try_register(Pid :: pid(), ShouldWriteF :: fun(), FromTo :: ejabberd_s2s:fromto()) -> boolean(). @@ -33,8 +32,7 @@ init(Opts) -> mongoose_backend:init(global, ?MAIN_MODULE, [], Opts), mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). --spec get_s2s_out_pids(ejabberd_s2s:fromto()) -> - {ok, [pid()]} | {error, Reason :: term()}. +-spec get_s2s_out_pids(ejabberd_s2s:fromto()) -> [pid()]. get_s2s_out_pids(FromTo) -> mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [FromTo]). diff --git a/src/s2s/mongoose_s2s_cets.erl b/src/s2s/mongoose_s2s_cets.erl index 1ad7688ce80..c65c9359706 100644 --- a/src/s2s/mongoose_s2s_cets.erl +++ b/src/s2s/mongoose_s2s_cets.erl @@ -24,11 +24,10 @@ init(_) -> %% Pid lists get_s2s_out_pids(FromTo) -> R = {{FromTo, '$1'}}, - Pids = ets:select(?TABLE, [{R, [], ['$1']}]), - {ok, Pids}. + ets:select(?TABLE, [{R, [], ['$1']}]). try_register(Pid, ShouldWriteF, FromTo) -> - {ok, L} = get_s2s_out_pids(FromTo), + L = get_s2s_out_pids(FromTo), case ShouldWriteF(L) of true -> cets:insert(?TABLE, {{FromTo, Pid}}), diff --git a/src/s2s/mongoose_s2s_mnesia.erl b/src/s2s/mongoose_s2s_mnesia.erl index e7d10e0a111..2b1b02fc545 100644 --- a/src/s2s/mongoose_s2s_mnesia.erl +++ b/src/s2s/mongoose_s2s_mnesia.erl @@ -31,11 +31,11 @@ init_pids() -> mnesia:add_table_copy(s2s, node(), ram_copies). get_s2s_out_pids(FromTo) -> - {ok, s2s_to_pids(mnesia:dirty_read(s2s, FromTo))}. + s2s_to_pids(mnesia:dirty_read(s2s, FromTo)). try_register(Pid, ShouldWriteF, FromTo) -> F = fun() -> - L = s2s_to_pids(mnesia:read({s2s, FromTo})), + L = get_s2s_out_pids(FromTo), case ShouldWriteF(L) of true -> mnesia:write(#s2s{fromto = FromTo, pid = Pid}), diff --git a/test/mongoose_cleanup_SUITE.erl b/test/mongoose_cleanup_SUITE.erl index 739f87990ef..acf513b4791 100644 --- a/test/mongoose_cleanup_SUITE.erl +++ b/test/mongoose_cleanup_SUITE.erl @@ -187,9 +187,9 @@ s2s(_Config) -> FromTo = {?HOST, <<"foreign">>}, ejabberd_s2s:try_register(FromTo), Self = self(), - [Self] = ejabberd_s2s:get_connections_pids(FromTo), + [Self] = ejabberd_s2s:get_s2s_out_pids(FromTo), mongoose_hooks:node_cleanup(node()), - [] = ejabberd_s2s:get_connections_pids(FromTo). + [] = ejabberd_s2s:get_s2s_out_pids(FromTo). bosh(_Config) -> {started, ok} = start(?HOST, mod_bosh), From 7ce404ed31d53c1f2c317e7d6c1734741c5e9591 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 6 Jul 2023 13:38:37 +0200 Subject: [PATCH 077/161] Pass FromTo into ejabberd_s2s_out as one argument Use FromTo instead of 3 arguments (MyServer, Server, FromTo) --- src/ejabberd_s2s.erl | 52 +++++++++++++++++++--------------------- src/ejabberd_s2s_in.erl | 2 +- src/ejabberd_s2s_out.erl | 25 +++++++++---------- 3 files changed, 37 insertions(+), 42 deletions(-) diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index 026e9151621..dbfcd28cc57 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -232,34 +232,38 @@ find_connection(From, To, Retries) -> %% We try to establish all the connections if the host is not a %% service and if the s2s host is not blacklisted or %% is in whitelist: - maybe_open_several_connections(From, To, MyServer, Server, FromTo, + maybe_open_several_connections(From, To, FromTo, MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode, Retries); L when is_list(L) -> - maybe_open_missing_connections(From, To, MyServer, Server, FromTo, + maybe_open_missing_connections(From, To, FromTo, MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode, L, Retries) end. -maybe_open_several_connections(From, To, MyServer, Server, FromTo, +%% Checks: +%% - if the host is not a service +%% - and if the s2s host is not blacklisted or is in whitelist +-spec is_s2s_allowed_for_host(fromto()) -> boolean(). +is_s2s_allowed_for_host({FromServer, ToServer} = FromTo) -> + not is_service(FromTo) andalso allow_host(FromServer, ToServer). + +maybe_open_several_connections(From, To, FromTo, MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode, Retries) -> - %% We try to establish all the connections if the host is not a - %% service and if the s2s host is not blacklisted or - %% is in whitelist: - case not is_service(From, To) andalso allow_host(MyServer, Server) of + %% We try to establish all the connections + case is_s2s_allowed_for_host(FromTo) of true -> NeededConnections = needed_connections_number( [], MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode), - open_several_connections( - NeededConnections, MyServer, Server, FromTo), + open_several_connections(NeededConnections, FromTo), find_connection(From, To, Retries - 1); false -> {error, not_allowed} end. -maybe_open_missing_connections(From, To, MyServer, Server, FromTo, +maybe_open_missing_connections(From, To, FromTo, MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode, L, Retries) -> NeededConnections = needed_connections_number( @@ -268,9 +272,7 @@ maybe_open_missing_connections(From, To, MyServer, Server, FromTo, case NeededConnections > 0 of true -> %% We establish the missing connections for this pair. - open_several_connections( - NeededConnections, MyServer, - Server, FromTo), + open_several_connections(NeededConnections, FromTo), find_connection(From, To, Retries - 1); false -> %% We choose a connexion from the pool of opened ones. @@ -290,18 +292,16 @@ choose_pid(From, Pids) -> ?LOG_DEBUG(#{what => s2s_choose_pid, from => From, s2s_pid => Pid}), Pid. --spec open_several_connections(N :: pos_integer(), MyServer :: jid:server(), - Server :: jid:server(), FromTo :: fromto()) -> ok. -open_several_connections(N, MyServer, Server, FromTo) -> +-spec open_several_connections(N :: pos_integer(), FromTo :: fromto()) -> ok. +open_several_connections(N, FromTo) -> ShouldWriteF = should_write_f(FromTo), - [new_connection(MyServer, Server, FromTo, ShouldWriteF) + [new_connection(FromTo, ShouldWriteF) || _N <- lists:seq(1, N)], ok. --spec new_connection(MyServer :: jid:server(), Server :: jid:server(), - FromTo :: fromto(), ShouldWriteF :: fun()) -> ok. -new_connection(MyServer, Server, FromTo, ShouldWriteF) -> - {ok, Pid} = ejabberd_s2s_out:start(MyServer, Server, new), +-spec new_connection(FromTo :: fromto(), ShouldWriteF :: fun()) -> ok. +new_connection(FromTo, ShouldWriteF) -> + {ok, Pid} = ejabberd_s2s_out:start(FromTo, new), case call_try_register(Pid, ShouldWriteF, FromTo) of true -> log_new_connection_result(Pid, FromTo), @@ -354,20 +354,18 @@ should_write_f(FromTo) -> end. %%-------------------------------------------------------------------- -%% Function: is_service(From, To) -> true | false %% Description: Return true if the destination must be considered as a %% service. %% -------------------------------------------------------------------- --spec is_service(jid:jid(), jid:jid()) -> boolean(). -is_service(From, To) -> - LFromDomain = From#jid.lserver, - case mongoose_config:lookup_opt({route_subdomains, LFromDomain}) of +-spec is_service(fromto()) -> boolean(). +is_service({FromServer, ToServer} = _FromTo) -> + case mongoose_config:lookup_opt({route_subdomains, FromServer}) of {ok, s2s} -> % bypass RFC 3920 10.3 false; {error, not_found} -> Hosts = ?MYHOSTS, P = fun(ParentDomain) -> lists:member(ParentDomain, Hosts) end, - lists:any(P, parent_domains(To#jid.lserver)) + lists:any(P, parent_domains(ToServer)) end. -spec parent_domains(binary()) -> [binary(), ...]. diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index d611a363236..6dfeec6ddae 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -316,7 +316,7 @@ stream_established({xmlstreamelement, El}, StateData) -> orelse mongoose_component:has_component(LTo)} of {true, true} -> ejabberd_s2s_out:terminate_if_waiting_delay(LTo, LFrom), - ejabberd_s2s_out:start(LTo, LFrom, + ejabberd_s2s_out:start({LTo, LFrom}, %% From and To are switched {verify, self(), Key, StateData#state.streamid}), Conns = dict:store({LFrom, LTo}, wait_for_verification, diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index 1d8fe30eb83..34e903af936 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -31,8 +31,8 @@ -xep([{xep, 220}, {version, "1.1.1"}]). %% External exports --export([start/3, - start_link/3, +-export([start/2, + start_link/2, start_connection/1, terminate_if_waiting_delay/2, stop_connection/1]). @@ -56,7 +56,7 @@ code_change/4]). -ignore_xref([open_socket/2, print_state/1, - reopen_socket/2, start_link/3, stream_established/2, + reopen_socket/2, start_link/2, stream_established/2, wait_before_retry/2, wait_for_auth_result/2, wait_for_features/2, wait_for_starttls_proceed/2, wait_for_stream/2, wait_for_stream/2, wait_for_validation/2]). @@ -110,9 +110,6 @@ -define(FSMOPTS, []). -endif. --define(SUPERVISOR_START, supervisor:start_child(ejabberd_s2s_out_sup, - [From, Host, Type])). - -define(FSMTIMEOUT, 30000). %% We do not block on send anymore. @@ -147,14 +144,14 @@ %%%---------------------------------------------------------------------- %%% API %%%---------------------------------------------------------------------- --spec start(_, _, _) -> {'error', _} | {'ok', 'undefined' | pid()} | {'ok', 'undefined' | pid(), _}. -start(From, Host, Type) -> - ?SUPERVISOR_START. +-spec start(ejabberd_s2s:fromto(), _) -> {'error', _} | {'ok', 'undefined' | pid()} | {'ok', 'undefined' | pid(), _}. +start(FromTo, Type) -> + supervisor:start_child(ejabberd_s2s_out_sup, [FromTo, Type]). --spec start_link(_, _, _) -> 'ignore' | {'error', _} | {'ok', pid()}. -start_link(From, Host, Type) -> - p1_fsm:start_link(ejabberd_s2s_out, [From, Host, Type], +-spec start_link(ejabberd_s2s:fromto(), _) -> 'ignore' | {'error', _} | {'ok', pid()}. +start_link(FromTo, Type) -> + p1_fsm:start_link(ejabberd_s2s_out, [FromTo, Type], fsm_limit_opts() ++ ?FSMOPTS). @@ -176,8 +173,8 @@ stop_connection(Pid) -> %% ignore | %% {stop, StopReason} %%---------------------------------------------------------------------- --spec init([any(), ...]) -> {'ok', 'open_socket', state()}. -init([From, Server, Type]) -> +-spec init(list()) -> {'ok', 'open_socket', state()}. +init([{From, Server} = _FromTo, Type]) -> process_flag(trap_exit, true), ?LOG_DEBUG(#{what => s2s_out_started, text => <<"New outgoing s2s connection">>, From cd711da3bc70d119a95b26b8f2dd86837ac6cf8d Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 6 Jul 2023 14:35:47 +0200 Subject: [PATCH 078/161] Shorter names for max_connection variables and functions --- src/ejabberd_s2s.erl | 118 ++++++++++++-------------------- src/ejabberd_s2s_in.erl | 9 ++- src/s2s/mongoose_s2s_mnesia.erl | 10 ++- 3 files changed, 59 insertions(+), 78 deletions(-) diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index dbfcd28cc57..5eb9677e935 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -63,8 +63,8 @@ -include("jlib.hrl"). -include("ejabberd_commands.hrl"). --define(DEFAULT_MAX_S2S_CONNECTIONS_NUMBER, 1). --define(DEFAULT_MAX_S2S_CONNECTIONS_NUMBER_PER_NODE, 1). +-define(DEFAULT_MAX_S2S_CONNECTIONS, 1). +-define(DEFAULT_MAX_S2S_CONNECTIONS_PER_NODE, 1). -type fromto() :: {'global' | jid:server(), jid:server()}. -record(state, {}). @@ -90,18 +90,6 @@ filter(From, To, Acc, Packet) -> route(From, To, Acc, Packet) -> do_route(From, To, Acc, Packet). --spec remove_connection(_, pid()) -> ok. -remove_connection(FromTo, Pid) -> - try - call_remove_connection(FromTo, Pid) - catch Class:Reason:Stacktrace -> - ?LOG_ERROR(#{what => s2s_remove_connection_failed, - from_to => FromTo, s2s_pid => Pid, - class => Class, reason => Reason, - stacktrace => Stacktrace}) - end, - ok. - -spec try_register(fromto()) -> boolean(). try_register(FromTo) -> ShouldWriteF = should_write_f(FromTo), @@ -110,10 +98,7 @@ try_register(FromTo) -> true -> true; false -> - {FromServer, ToServer} = FromTo, - ?LOG_ERROR(#{what => s2s_register_failed, - from_server => FromServer, - to_server => ToServer}), + ?LOG_ERROR(#{what => s2s_register_failed, from_to => FromTo}), false end. @@ -126,8 +111,7 @@ node_cleanup(Acc, #{node := Node}, _) -> Res = call_node_cleanup(Node), {ok, maps:put(?MODULE, Res, Acc)}. --spec key(mongooseim:host_type(), {jid:lserver(), jid:lserver()}, binary()) -> - binary(). +-spec key(mongooseim:host_type(), fromto(), binary()) -> binary(). key(HostType, {From, To}, StreamID) -> {ok, {_, Secret}} = get_shared_secret(HostType), SecretHashed = base16:encode(crypto:hash(sha256, Secret)), @@ -218,10 +202,9 @@ find_connection(From, To, Retries) -> #jid{lserver = MyServer} = From, #jid{lserver = Server} = To, FromTo = {MyServer, Server}, - MaxS2SConnectionsNumber = max_s2s_connections_number(FromTo), - MaxS2SConnectionsNumberPerNode = - max_s2s_connections_number_per_node(FromTo), - ?LOG_DEBUG(#{what => s2s_find_connection, from_server => MyServer, to_server => Server}), + MaxConnections = max_s2s_connections(FromTo), + MaxConnectionsPerNode = max_s2s_connections_per_node(FromTo), + ?LOG_DEBUG(#{what => s2s_find_connection, from_to => FromTo}), case get_s2s_out_pids(FromTo) of [] -> %% TODO too complex, and could cause issues on bursts. @@ -233,12 +216,12 @@ find_connection(From, To, Retries) -> %% service and if the s2s host is not blacklisted or %% is in whitelist: maybe_open_several_connections(From, To, FromTo, - MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode, Retries); + MaxConnections, + MaxConnectionsPerNode, Retries); L when is_list(L) -> maybe_open_missing_connections(From, To, FromTo, - MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode, L, Retries) + MaxConnections, + MaxConnectionsPerNode, L, Retries) end. %% Checks: @@ -249,14 +232,12 @@ is_s2s_allowed_for_host({FromServer, ToServer} = FromTo) -> not is_service(FromTo) andalso allow_host(FromServer, ToServer). maybe_open_several_connections(From, To, FromTo, - MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode, Retries) -> + MaxConnections, MaxConnectionsPerNode, Retries) -> %% We try to establish all the connections case is_s2s_allowed_for_host(FromTo) of true -> NeededConnections = needed_connections_number( - [], MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode), + [], MaxConnections, MaxConnectionsPerNode), open_several_connections(NeededConnections, FromTo), find_connection(From, To, Retries - 1); false -> @@ -264,11 +245,9 @@ maybe_open_several_connections(From, To, FromTo, end. maybe_open_missing_connections(From, To, FromTo, - MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode, L, Retries) -> + MaxConnections, MaxConnectionsPerNode, L, Retries) -> NeededConnections = needed_connections_number( - L, MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode), + L, MaxConnections, MaxConnectionsPerNode), case NeededConnections > 0 of true -> %% We establish the missing connections for this pair. @@ -295,62 +274,54 @@ choose_pid(From, Pids) -> -spec open_several_connections(N :: pos_integer(), FromTo :: fromto()) -> ok. open_several_connections(N, FromTo) -> ShouldWriteF = should_write_f(FromTo), - [new_connection(FromTo, ShouldWriteF) - || _N <- lists:seq(1, N)], + [new_connection(FromTo, ShouldWriteF) || _N <- lists:seq(1, N)], ok. -spec new_connection(FromTo :: fromto(), ShouldWriteF :: fun()) -> ok. new_connection(FromTo, ShouldWriteF) -> + %% Serialize opening of connections {ok, Pid} = ejabberd_s2s_out:start(FromTo, new), case call_try_register(Pid, ShouldWriteF, FromTo) of true -> - log_new_connection_result(Pid, FromTo), + ?LOG_INFO(#{what => s2s_new_connection, + text => <<"New s2s connection started">>, + from_to => FromTo, s2s_pid => Pid}), ejabberd_s2s_out:start_connection(Pid); false -> ejabberd_s2s_out:stop_connection(Pid) end, ok. -log_new_connection_result(Pid, FromTo) -> - {FromServer, ToServer} = FromTo, - ?LOG_INFO(#{what => s2s_new_connection, - text => <<"New s2s connection started">>, - from_server => FromServer, - to_server => ToServer, - s2s_pid => Pid}). - --spec max_s2s_connections_number(fromto()) -> pos_integer(). -max_s2s_connections_number({From, To}) -> - {ok, HostType} = mongoose_domain_api:get_host_type(From), - case acl:match_rule(HostType, max_s2s_connections, jid:make(<<"">>, To, <<"">>)) of - Max when is_integer(Max) -> Max; - _ -> ?DEFAULT_MAX_S2S_CONNECTIONS_NUMBER - end. - --spec max_s2s_connections_number_per_node(fromto()) -> pos_integer(). -max_s2s_connections_number_per_node({From, To}) -> - {ok, HostType} = mongoose_domain_api:get_host_type(From), - case acl:match_rule(HostType, max_s2s_connections_per_node, jid:make(<<"">>, To, <<"">>)) of - Max when is_integer(Max) -> Max; - _ -> ?DEFAULT_MAX_S2S_CONNECTIONS_NUMBER_PER_NODE +-spec max_s2s_connections(fromto()) -> pos_integer(). +max_s2s_connections(FromTo) -> + match_integer_acl_rule(FromTo, max_s2s_connections, + ?DEFAULT_MAX_S2S_CONNECTIONS). + +-spec max_s2s_connections_per_node(fromto()) -> pos_integer(). +max_s2s_connections_per_node(FromTo) -> + match_integer_acl_rule(FromTo, max_s2s_connections_per_node, + ?DEFAULT_MAX_S2S_CONNECTIONS_PER_NODE). + +-spec match_integer_acl_rule(fromto(), atom(), integer()) -> term(). +match_integer_acl_rule({FromServer, ToServer}, Rule, Default) -> + {ok, HostType} = mongoose_domain_api:get_host_type(FromServer), + ToServerJid = jid:make(<<>>, ToServer, <<>>), + case acl:match_rule(HostType, Rule, ToServerJid) of + Int when is_integer(Int) -> Int; + _ -> Default end. -spec needed_connections_number([pid()], pos_integer(), pos_integer()) -> integer(). -needed_connections_number(Ls, MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode) when is_list(Ls) -> +needed_connections_number(Ls, MaxConnections, MaxConnectionsPerNode) when is_list(Ls) -> LocalLs = [L || L <- Ls, node(L) == node()], - lists:min([MaxS2SConnectionsNumber - length(Ls), - MaxS2SConnectionsNumberPerNode - length(LocalLs)]). + lists:min([MaxConnections - length(Ls), + MaxConnectionsPerNode - length(LocalLs)]). should_write_f(FromTo) -> - MaxS2SConnectionsNumber = max_s2s_connections_number(FromTo), - MaxS2SConnectionsNumberPerNode = - max_s2s_connections_number_per_node(FromTo), + MaxConnections = max_s2s_connections(FromTo), + MaxConnectionsPerNode = max_s2s_connections_per_node(FromTo), fun(L) when is_list(L) -> - NeededConnections = needed_connections_number( - L, MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode), - NeededConnections > 0 + needed_connections_number(L, MaxConnections, MaxConnectionsPerNode) > 0 end. %%-------------------------------------------------------------------- @@ -532,7 +503,8 @@ call_try_register(Pid, ShouldWriteF, FromTo) -> call_node_cleanup(Node) -> mongoose_s2s_backend:node_cleanup(Node). -call_remove_connection(FromTo, Pid) -> +-spec remove_connection(fromto(), pid()) -> ok. +remove_connection(FromTo, Pid) -> mongoose_s2s_backend:remove_connection(FromTo, Pid). -spec get_shared_secret(mongooseim:host_type()) -> {ok, {secret_source(), base16_secret()}} | {error, not_found}. diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index 6dfeec6ddae..74e54110c9f 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -207,10 +207,13 @@ start_stream(#{<<"version">> := <<"1.0">>, <<"from">> := RemoteServer}, cert_error => CertError}), Res = stream_start_error(StateData, mongoose_xmpp_errors:policy_violation(?MYLANG, CertError)), + %% Why would we close outgoing connections if the incoming connection fails auth? + %% That incoming connection could be from a hacker, and it would result in kicking + %% our outgoing connections. %% FIXME: why do we want stop just one of the connections here? - {ok, Pid} = ejabberd_s2s:find_connection(jid:make(<<>>, Server, <<>>), - jid:make(<<>>, RemoteServer, <<>>)), - ejabberd_s2s_out:stop_connection(Pid), +% {ok, Pid} = ejabberd_s2s:find_connection(jid:make(<<>>, Server, <<>>), +% jid:make(<<>>, RemoteServer, <<>>)), +% ejabberd_s2s_out:stop_connection(Pid), Res; _ -> send_text(StateData, ?STREAM_HEADER(<<" version='1.0'">>)), diff --git a/src/s2s/mongoose_s2s_mnesia.erl b/src/s2s/mongoose_s2s_mnesia.erl index 2b1b02fc545..1fdf5f09340 100644 --- a/src/s2s/mongoose_s2s_mnesia.erl +++ b/src/s2s/mongoose_s2s_mnesia.erl @@ -59,8 +59,14 @@ remove_connection(FromTo, Pid) -> F = fun() -> mnesia:delete_object(Rec) end, - {atomic, _} = mnesia:transaction(F), - ok. + case mnesia:transaction(F) of + {atomic, _} -> + ok; + Other -> + ?LOG_ERROR(#{what => s2s_remove_connection, + from_to => FromTo, reason => Other}), + ok + end. node_cleanup(Node) -> F = fun() -> From 1f7deb68a9e40fff4d9e4886cadf12359d33d909 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 6 Jul 2023 17:20:50 +0200 Subject: [PATCH 079/161] Only use fromto() in ejabberd_s2s_in --- src/ejabberd_s2s.erl | 152 ++++++++-------- src/ejabberd_s2s_in.erl | 302 ++++++++++++++----------------- src/ejabberd_s2s_out.erl | 22 ++- src/s2s/mongoose_s2s_backend.erl | 2 +- 4 files changed, 225 insertions(+), 253 deletions(-) diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index 5eb9677e935..083f7328e08 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -39,8 +39,7 @@ get_s2s_out_pids/1, try_register/1, remove_connection/2, - find_connection/2, - allow_host/2, + allow_host/1, domain_utf8_to_ascii/1, timeout/0, lookup_certfile/1 @@ -66,13 +65,19 @@ -define(DEFAULT_MAX_S2S_CONNECTIONS, 1). -define(DEFAULT_MAX_S2S_CONNECTIONS_PER_NODE, 1). --type fromto() :: {'global' | jid:server(), jid:server()}. +%% Pair of hosts {FromServer(), ToServer()}. +%% Used in a lot of API and backend functions. +-type fromto() :: {jid:server(), jid:server()}. + +%% Pids for ejabberd_s2s_out servers +-type s2s_pids() :: [pid()]. + -record(state, {}). -type secret_source() :: config | random. -type base16_secret() :: binary(). --export_type([fromto/0, secret_source/0, base16_secret/0]). +-export_type([fromto/0, s2s_pids/0, secret_source/0, base16_secret/0]). %%==================================================================== %% API @@ -191,79 +196,42 @@ do_route(From, To, Acc, Packet) -> end end. --spec find_connection(From :: jid:jid(), - To :: jid:jid()) -> {ok, pid()} | {error, Reason :: term()}. +-spec make_from_to(From :: jid:jid(), To :: jid:jid()) -> fromto(). +make_from_to(#jid{lserver = FromServer}, #jid{lserver = ToServer}) -> + {FromServer, ToServer}. + +-spec find_connection(From :: jid:jid(), To :: jid:jid()) -> + {ok, pid()} | {error, not_allowed}. find_connection(From, To) -> - find_connection(From, To, 3). - -find_connection(_From, _To, 0) -> - {error, retries_failed}; -find_connection(From, To, Retries) -> - #jid{lserver = MyServer} = From, - #jid{lserver = Server} = To, - FromTo = {MyServer, Server}, - MaxConnections = max_s2s_connections(FromTo), - MaxConnectionsPerNode = max_s2s_connections_per_node(FromTo), + FromTo = make_from_to(From, To), ?LOG_DEBUG(#{what => s2s_find_connection, from_to => FromTo}), - case get_s2s_out_pids(FromTo) of + OldCons = get_s2s_out_pids(FromTo), + NewCons = ensure_enough_connections(FromTo, OldCons), + case NewCons of [] -> - %% TODO too complex, and could cause issues on bursts. - %% What would happen if connection is denied? - %% Start a pool instead maybe? - %% When do we close the connection? - - %% We try to establish all the connections if the host is not a - %% service and if the s2s host is not blacklisted or - %% is in whitelist: - maybe_open_several_connections(From, To, FromTo, - MaxConnections, - MaxConnectionsPerNode, Retries); - L when is_list(L) -> - maybe_open_missing_connections(From, To, FromTo, - MaxConnections, - MaxConnectionsPerNode, L, Retries) + {error, not_allowed}; + _ -> + {ok, choose_pid(From, NewCons)} end. -%% Checks: -%% - if the host is not a service -%% - and if the s2s host is not blacklisted or is in whitelist --spec is_s2s_allowed_for_host(fromto()) -> boolean(). -is_s2s_allowed_for_host({FromServer, ToServer} = FromTo) -> - not is_service(FromTo) andalso allow_host(FromServer, ToServer). - -maybe_open_several_connections(From, To, FromTo, - MaxConnections, MaxConnectionsPerNode, Retries) -> - %% We try to establish all the connections - case is_s2s_allowed_for_host(FromTo) of - true -> - NeededConnections = needed_connections_number( - [], MaxConnections, MaxConnectionsPerNode), +%% Opens more connections if needed and allowed. +ensure_enough_connections(FromTo, OldCons) -> + NeededConnections = needed_connections_number_if_allowed(FromTo, OldCons), + case NeededConnections of + 0 -> + OldCons; + _ -> open_several_connections(NeededConnections, FromTo), - find_connection(From, To, Retries - 1); - false -> - {error, not_allowed} - end. - -maybe_open_missing_connections(From, To, FromTo, - MaxConnections, MaxConnectionsPerNode, L, Retries) -> - NeededConnections = needed_connections_number( - L, MaxConnections, MaxConnectionsPerNode), - case NeededConnections > 0 of - true -> - %% We establish the missing connections for this pair. - open_several_connections(NeededConnections, FromTo), - find_connection(From, To, Retries - 1); - false -> - %% We choose a connexion from the pool of opened ones. - {ok, choose_pid(From, L)} + %% Query for s2s pids one more time + get_s2s_out_pids(FromTo) end. %% Prefers the local connection (i.e. not on the remote node) --spec choose_pid(From :: jid:jid(), Pids :: [pid()]) -> pid(). -choose_pid(From, Pids) -> - Pids1 = case [P || P <- Pids, node(P) == node()] of +-spec choose_pid(From :: jid:jid(), Pids :: s2s_pids()) -> pid(). +choose_pid(From, [_|_] = Pids) -> + Pids1 = case filter_local_pids(Pids) of [] -> Pids; - Ps -> Ps + FilteredPids -> FilteredPids end, % Use sticky connections based on the JID of the sender % (without the resource to ensure that a muc room always uses the same connection) @@ -271,6 +239,12 @@ choose_pid(From, Pids) -> ?LOG_DEBUG(#{what => s2s_choose_pid, from => From, s2s_pid => Pid}), Pid. +%% Returns only pids from the current node. +-spec filter_local_pids(s2s_pids()) -> s2s_pids(). +filter_local_pids(Pids) -> + Node = node(), + [Pid || Pid <- Pids, node(Pid) == Node]. + -spec open_several_connections(N :: pos_integer(), FromTo :: fromto()) -> ok. open_several_connections(N, FromTo) -> ShouldWriteF = should_write_f(FromTo), @@ -311,17 +285,34 @@ match_integer_acl_rule({FromServer, ToServer}, Rule, Default) -> _ -> Default end. --spec needed_connections_number([pid()], pos_integer(), pos_integer()) -> integer(). -needed_connections_number(Ls, MaxConnections, MaxConnectionsPerNode) when is_list(Ls) -> - LocalLs = [L || L <- Ls, node(L) == node()], - lists:min([MaxConnections - length(Ls), - MaxConnectionsPerNode - length(LocalLs)]). +needed_connections_number_if_allowed(FromTo, OldCons) -> + case is_s2s_allowed_for_host(FromTo, OldCons) of + true -> + needed_extra_connections_number(FromTo, OldCons); + false -> + 0 + end. -should_write_f(FromTo) -> +%% Checks: +%% - if the host is not a service +%% - and if the s2s host is not blacklisted or is in whitelist +-spec is_s2s_allowed_for_host(fromto(), _OldConnections :: s2s_pids()) -> boolean(). +is_s2s_allowed_for_host(_FromTo, [_|_]) -> + true; %% Has outgoing connections established, skip the check +is_s2s_allowed_for_host(FromTo, []) -> + not is_service(FromTo) andalso allow_host(FromTo). + +-spec needed_extra_connections_number(fromto(), s2s_pids()) -> non_neg_integer(). +needed_extra_connections_number(FromTo, Connections) -> MaxConnections = max_s2s_connections(FromTo), MaxConnectionsPerNode = max_s2s_connections_per_node(FromTo), - fun(L) when is_list(L) -> - needed_connections_number(L, MaxConnections, MaxConnectionsPerNode) > 0 + LocalPids = filter_local_pids(Connections), + lists:min([MaxConnections - length(Connections), + MaxConnectionsPerNode - length(LocalPids)]). + +should_write_f(FromTo) -> + fun(Connections) when is_list(Connections) -> + needed_extra_connections_number(FromTo, Connections) > 0 end. %%-------------------------------------------------------------------- @@ -392,19 +383,20 @@ commands() -> ]. %% Check if host is in blacklist or white list -allow_host(MyServer, S2SHost) -> - case mongoose_domain_api:get_host_type(MyServer) of +-spec allow_host(fromto()) -> boolean(). +allow_host({FromServer, ToServer}) -> + case mongoose_domain_api:get_host_type(FromServer) of {error, not_found} -> false; {ok, HostType} -> - case mongoose_config:lookup_opt([{s2s, HostType}, host_policy, S2SHost]) of + case mongoose_config:lookup_opt([{s2s, HostType}, host_policy, ToServer]) of {ok, allow} -> true; {ok, deny} -> false; {error, not_found} -> mongoose_config:get_opt([{s2s, HostType}, default_policy]) =:= allow - andalso mongoose_hooks:s2s_allow_host(MyServer, S2SHost) =:= allow + andalso mongoose_hooks:s2s_allow_host(FromServer, ToServer) =:= allow end end. @@ -491,7 +483,7 @@ db_init() -> mongoose_s2s_backend:init(#{backend => Backend}). %% Get ejabberd_s2s_out pids --spec get_s2s_out_pids(FromTo :: fromto()) -> [pid()]. +-spec get_s2s_out_pids(FromTo :: fromto()) -> s2s_pids(). get_s2s_out_pids(FromTo) -> mongoose_s2s_backend:get_s2s_out_pids(FromTo). diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index 74e54110c9f..be6a698533f 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -65,7 +65,7 @@ host_type :: mongooseim:host_type() | undefined, authenticated = false :: boolean(), auth_domain :: binary() | undefined, - connections = dict:new(), + connections = #{} :: map(), timer :: reference() }). -type state() :: #state{}. @@ -205,16 +205,12 @@ start_stream(#{<<"version">> := <<"1.0">>, <<"from">> := RemoteServer}, remote_server => RemoteServer, reason => cert_error, cert_error => CertError}), - Res = stream_start_error(StateData, - mongoose_xmpp_errors:policy_violation(?MYLANG, CertError)), - %% Why would we close outgoing connections if the incoming connection fails auth? - %% That incoming connection could be from a hacker, and it would result in kicking - %% our outgoing connections. - %% FIXME: why do we want stop just one of the connections here? -% {ok, Pid} = ejabberd_s2s:find_connection(jid:make(<<>>, Server, <<>>), -% jid:make(<<>>, RemoteServer, <<>>)), -% ejabberd_s2s_out:stop_connection(Pid), - Res; + stream_start_error(StateData, + mongoose_xmpp_errors:policy_violation(?MYLANG, CertError)); + %% We were stopping ejabberd_s2s_out connection in the older version of the code + %% from this location. But stopping outgoing connections just because a non-verified + %% incoming connection fails is an abuse risk (a hacker could connect with an invalid + %% certificate, it should not cause stopping ejabberd_s2s_out connections). _ -> send_text(StateData, ?STREAM_HEADER(<<" version='1.0'">>)), send_element(StateData, @@ -306,29 +302,24 @@ tls_options_with_certfile(#state{host_type = HostType, tls_options = TLSOptions} stream_established({xmlstreamelement, El}, StateData) -> cancel_timer(StateData#state.timer), Timer = erlang:start_timer(ejabberd_s2s:timeout(), self(), []), - case is_key_packet(El) of - {key, To, From, Id, Key} -> + case parse_key_packet(El) of + %% We use LocalServer and RemoteServer instead of From and To to avoid confusion + {db_result, FromTo, Id, Key} -> ?LOG_DEBUG(#{what => s2s_in_get_key, - to => To, from => From, message_id => Id, key => Key}), - LTo = jid:nameprep(To), - LFrom = jid:nameprep(From), + from_to => FromTo, message_id => Id, key => Key}), %% Checks if the from domain is allowed and if the to %% domain is handled by this server: - case {ejabberd_s2s:allow_host(LTo, LFrom), - mongoose_router:is_registered_route(LTo) - orelse mongoose_component:has_component(LTo)} of + case {ejabberd_s2s:allow_host(FromTo), is_local_host_known(FromTo)} of {true, true} -> - ejabberd_s2s_out:terminate_if_waiting_delay(LTo, LFrom), - ejabberd_s2s_out:start({LTo, LFrom}, %% From and To are switched - {verify, self(), - Key, StateData#state.streamid}), - Conns = dict:store({LFrom, LTo}, wait_for_verification, - StateData#state.connections), - change_shaper(StateData, LTo, jid:make(<<>>, LFrom, <<>>)), + ejabberd_s2s_out:terminate_if_waiting_delay(FromTo), + StartType = {verify, self(), Key, StateData#state.streamid}, + ejabberd_s2s_out:start(FromTo, StartType), + Conns = maps:put(FromTo, wait_for_verification, + StateData#state.connections), + change_shaper(StateData, FromTo), {next_state, stream_established, - StateData#state{connections = Conns, - timer = Timer}}; + StateData#state{connections = Conns, timer = Timer}}; {_, false} -> send_element(StateData, mongoose_xmpp_errors:host_unknown()), ?LOG_WARNING(#{what => s2s_in_key_from_uknown_host}), @@ -338,61 +329,29 @@ stream_established({xmlstreamelement, El}, StateData) -> ?LOG_WARNING(#{what => s2s_in_key_with_invalid_from}), {stop, normal, StateData} end; - {verify, To, From, Id, Key} -> + {db_verify, FromTo, Id, Key} -> ?LOG_DEBUG(#{what => s2s_in_verify_key, - to => To, from => From, message_id => Id, key => Key}), - LTo = jid:nameprep(To), - LFrom = jid:nameprep(From), - Type = case ejabberd_s2s:key(StateData#state.host_type, {LTo, LFrom}, Id) of + from_to => FromTo, message_id => Id, key => Key}), + Type = case ejabberd_s2s:key(StateData#state.host_type, FromTo, Id) of Key -> <<"valid">>; _ -> <<"invalid">> end, - %% XEP-0185: Dialback Key Generation and Validation - %% DB means dial-back - send_element(StateData, - #xmlel{name = <<"db:verify">>, - attrs = [{<<"from">>, To}, - {<<"to">>, From}, - {<<"id">>, Id}, - {<<"type">>, Type}]}), + send_element(StateData, db_verify_xml(FromTo, Id, Type)), {next_state, stream_established, StateData#state{timer = Timer}}; - _ -> - NewEl = jlib:remove_attr(<<"xmlns">>, El), - #xmlel{attrs = Attrs} = NewEl, - FromS = xml:get_attr_s(<<"from">>, Attrs), - From = jid:from_binary(FromS), - ToS = xml:get_attr_s(<<"to">>, Attrs), - To = jid:from_binary(ToS), - case {From, To} of - {error, _} -> ok; - {_, error} -> ok; - _ -> route_incoming_stanza(From, To, NewEl, StateData) - end, + false -> + Res = parse_and_route_incoming_stanza(El, StateData), + handle_routing_result(Res, El, StateData), {next_state, stream_established, StateData#state{timer = Timer}} end; -stream_established({valid, From, To}, StateData) -> - send_element(StateData, - #xmlel{name = <<"db:result">>, - attrs = [{<<"from">>, To}, - {<<"to">>, From}, - {<<"type">>, <<"valid">>}]}), - LFrom = jid:nameprep(From), - LTo = jid:nameprep(To), - NSD = StateData#state{ - connections = dict:store({LFrom, LTo}, established, - StateData#state.connections)}, +stream_established({valid, FromTo}, StateData) -> + send_element(StateData, db_result_xml(FromTo, <<"valid">>)), + Cons = maps:put(FromTo, established, StateData#state.connections), + NSD = StateData#state{connections = Cons}, {next_state, stream_established, NSD}; -stream_established({invalid, From, To}, StateData) -> - send_element(StateData, - #xmlel{name = <<"db:result">>, - attrs = [{<<"from">>, To}, - {<<"to">>, From}, - {<<"type">>, <<"invalid">>}]}), - LFrom = jid:nameprep(From), - LTo = jid:nameprep(To), - NSD = StateData#state{ - connections = dict:erase({LFrom, LTo}, - StateData#state.connections)}, +stream_established({invalid, FromTo}, StateData) -> + send_element(StateData, db_result_xml(FromTo, <<"invalid">>)), + Cons = maps:remove(FromTo, StateData#state.connections), + NSD = StateData#state{connections = Cons}, {next_state, stream_established, NSD}; stream_established({xmlstreamend, _Name}, StateData) -> send_text(StateData, ?STREAM_TRAILER), @@ -407,84 +366,77 @@ stream_established(timeout, StateData) -> stream_established(closed, StateData) -> {stop, normal, StateData}. --spec route_incoming_stanza(From :: jid:jid(), - To :: jid:jid(), +handle_routing_result(ok, _El, _StateData) -> + ok; +handle_routing_result({error, Reason}, El, _StateData) -> + ?LOG_WARNING(#{what => s2s_in_route_failed, reason => Reason, element => El}). + +parse_and_route_incoming_stanza(El, StateData) -> + NewEl = jlib:remove_attr(<<"xmlns">>, El), + RemoteJid = jid:from_binary(exml_query:attr(El, <<"from">>, <<>>)), + LocalJid = jid:from_binary(exml_query:attr(El, <<"to">>, <<>>)), + case {RemoteJid, LocalJid, is_valid_stanza(NewEl)} of + {#jid{}, #jid{}, true} -> + route_incoming_stanza(RemoteJid, LocalJid, NewEl, StateData); + _ -> + {error, invalid_stanza} + end. + +-spec route_incoming_stanza(RemoteJid :: jid:jid(), + LocalJid :: jid:jid(), El :: exml:element(), - StateData :: state()) -> - mongoose_acc:t() | error. -route_incoming_stanza(From, To, El, StateData) -> - LFromS = From#jid.lserver, - LToS = To#jid.lserver, - #xmlel{name = Name} = El, + StateData :: state()) -> ok | {error, term()}. +route_incoming_stanza(RemoteJid, LocalJid, El, StateData) -> + LRemoteServer = RemoteJid#jid.lserver, + LLocalServer = LocalJid#jid.lserver, + FromTo = {LLocalServer, LRemoteServer}, Acc = mongoose_acc:new(#{ location => ?LOCATION, - lserver => LToS, + lserver => LLocalServer, element => El, - from_jid => From, - to_jid => To }), - case is_s2s_authenticated(LFromS, LToS, StateData) of + from_jid => RemoteJid, + to_jid => LocalJid }), + case is_s2s_authenticated_or_connected(FromTo, StateData) of true -> - route_stanza(Name, Acc); + route_stanza(Acc); false -> - case is_s2s_connected(LFromS, LToS, StateData) of - true -> - route_stanza(Name, Acc); - false -> - error - end + {error, not_allowed} end. -is_s2s_authenticated(_, _, #state{authenticated = false}) -> +is_s2s_authenticated_or_connected(FromTo, StateData) -> + is_s2s_authenticated(FromTo, StateData) orelse + is_s2s_connected(FromTo, StateData). + +-spec is_s2s_authenticated(ejabberd_s2s:fromto(), #state{}) -> boolean(). +is_s2s_authenticated(_FromTo, #state{authenticated = false}) -> false; -is_s2s_authenticated(LFrom, LTo, #state{auth_domain = LFrom}) -> - mongoose_router:is_registered_route(LTo) - orelse mongoose_component:has_component(LTo); -is_s2s_authenticated(_, _, _) -> - false. +is_s2s_authenticated(FromTo, State) -> + same_auth_domain(FromTo, State) andalso is_local_host_known(FromTo). -is_s2s_connected(LFrom, LTo, StateData) -> - case dict:find({LFrom, LTo}, StateData#state.connections) of - {ok, established} -> - true; - _ -> - false - end. +-spec same_auth_domain(ejabberd_s2s:fromto(), #state{}) -> boolean(). +same_auth_domain({_, LRemoteServer}, #state{auth_domain = AuthDomain}) -> + LRemoteServer =:= AuthDomain. --spec route_stanza(binary(), mongoose_acc:t()) -> mongoose_acc:t(). -route_stanza(<<"iq">>, Acc) -> - route_stanza(Acc); -route_stanza(<<"message">>, Acc) -> - route_stanza(Acc); -route_stanza(<<"presence">>, Acc) -> - route_stanza(Acc); -route_stanza(_, _Acc) -> - error. - --spec route_stanza(mongoose_acc:t()) -> mongoose_acc:t(). +-spec is_s2s_connected(ejabberd_s2s:fromto(), #state{}) -> boolean(). +is_s2s_connected(FromTo, StateData) -> + {ok, established} =:= maps:find(FromTo, StateData#state.connections). + +-spec is_valid_stanza(exml:element()) -> boolean(). +is_valid_stanza(#xmlel{name = Name}) -> + is_valid_stanza_name(Name). + +is_valid_stanza_name(<<"iq">>) -> true; +is_valid_stanza_name(<<"message">>) -> true; +is_valid_stanza_name(<<"presence">>) -> true; +is_valid_stanza_name(_) -> false. + +-spec route_stanza(mongoose_acc:t()) -> ok. route_stanza(Acc) -> From = mongoose_acc:from_jid(Acc), To = mongoose_acc:to_jid(Acc), Acc1 = mongoose_hooks:s2s_receive_packet(Acc), - ejabberd_router:route(From, To, Acc1). - -%%---------------------------------------------------------------------- -%% Func: StateName/3 -%% Returns: {next_state, NextStateName, NextStateData} | -%% {next_state, NextStateName, NextStateData, Timeout} | -%% {reply, Reply, NextStateName, NextStateData} | -%% {reply, Reply, NextStateName, NextStateData, Timeout} | -%% {stop, Reason, NewStateData} | -%% {stop, Reason, Reply, NewStateData} -%%---------------------------------------------------------------------- -%state_name(Event, From, StateData) -> -% Reply = ok, -% {reply, Reply, state_name, StateData}. + ejabberd_router:route(From, To, Acc1), + ok. -%%---------------------------------------------------------------------- -%% Func: handle_event/3 -%% Returns: {next_state, NextStateName, NextStateData} | -%% {next_state, NextStateName, NextStateData, Timeout} | -%% {stop, Reason, NewStateData} -%%---------------------------------------------------------------------- handle_event(_Event, StateName, StateData) -> {next_state, StateName, StateData}. @@ -503,8 +455,8 @@ handle_sync_event(get_state_infos, _From, StateName, StateData) -> [StateData#state.auth_domain]; false -> Connections = StateData#state.connections, - [D || {{D, _}, established} <- - dict:to_list(Connections)] + [LRemoteServer || {{_, LRemoteServer}, established} <- + maps:to_list(Connections)] end, Infos = [ {direction, in}, @@ -579,11 +531,13 @@ send_element(StateData, El) -> stream_features(HostType, Domain) -> mongoose_hooks:s2s_stream_features(HostType, Domain). --spec change_shaper(state(), jid:lserver(), jid:jid()) -> any(). -change_shaper(StateData, Host, JID) -> - {ok, HostType} = mongoose_domain_api:get_host_type(Host), +-spec change_shaper(state(), ejabberd_s2s:fromto()) -> ok. +change_shaper(StateData, {LLocalServer, LRemoteServer}) -> + {ok, HostType} = mongoose_domain_api:get_host_type(LLocalServer), + JID = jid:make(<<>>, LRemoteServer, <<>>), Shaper = acl:match_rule(HostType, StateData#state.shaper, JID), - mongoose_transport:change_shaper(StateData#state.socket, Shaper). + mongoose_transport:change_shaper(StateData#state.socket, Shaper), + ok. -spec new_id() -> binary(). @@ -601,26 +555,37 @@ cancel_timer(Timer) -> ok end. - --spec is_key_packet(exml:element()) -> 'false' | {'key', _, _, _, binary()} - | {'verify', _, _, _, binary()}. -is_key_packet(#xmlel{name = Name, attrs = Attrs, - children = Els}) when Name == <<"db:result">> -> - {key, - xml:get_attr_s(<<"to">>, Attrs), - xml:get_attr_s(<<"from">>, Attrs), - xml:get_attr_s(<<"id">>, Attrs), - xml:get_cdata(Els)}; -is_key_packet(#xmlel{name = Name, attrs = Attrs, - children = Els}) when Name == <<"db:verify">> -> - {verify, - xml:get_attr_s(<<"to">>, Attrs), - xml:get_attr_s(<<"from">>, Attrs), - xml:get_attr_s(<<"id">>, Attrs), - xml:get_cdata(Els)}; -is_key_packet(_) -> +%% XEP-0185: Dialback Key Generation and Validation +%% DB means dial-back +-spec db_verify_xml(ejabberd_s2s:fromto(), binary(), binary()) -> exml:element(). +db_verify_xml({LocalServer, RemoteServer}, Id, Type) -> + #xmlel{name = <<"db:verify">>, + attrs = [{<<"from">>, LocalServer}, + {<<"to">>, RemoteServer}, + {<<"id">>, Id}, + {<<"type">>, Type}]}. + +-spec db_result_xml(ejabberd_s2s:fromto(), binary()) -> exml:element(). +db_result_xml({LocalServer, RemoteServer}, Type) -> + #xmlel{name = <<"db:result">>, + attrs = [{<<"from">>, LocalServer}, + {<<"to">>, RemoteServer}, + {<<"type">>, Type}]}. + +-spec parse_key_packet(exml:element()) -> false + | {db_result | db_verify, FromTo :: ejabberd_s2s:fromto(), Id :: binary(), Key :: binary()}. +parse_key_packet(El = #xmlel{name = <<"db:result">>}) -> + parsed_key_packet(db_result, El); +parse_key_packet(El = #xmlel{name = <<"db:verify">>}) -> + parsed_key_packet(db_verify, El); +parse_key_packet(_) -> false. +parsed_key_packet(Type, El) -> + FromTo = parse_from_to(El), + Id = exml_query:attr(El, <<"id">>, <<>>), + Key = exml_query:cdata(El), + {Type, FromTo, Id, Key}. -spec match_domain(binary(), binary()) -> boolean(). match_domain(Domain, Domain) -> @@ -728,3 +693,16 @@ get_tls_xmlel(#state{tls_enabled = false, tls_required = true}) -> [#xmlel{name = <<"starttls">>, attrs = [{<<"xmlns">>, ?NS_TLS}], children = [#xmlel{name = <<"required">>}]}]. + +is_local_host_known({LLocalServer, _}) -> + mongoose_router:is_registered_route(LLocalServer) + orelse mongoose_component:has_component(LLocalServer). + +-spec parse_from_to(exml:element()) -> ejabberd_s2s:fromto(). +parse_from_to(El) -> + RemoteJid = jid:from_binary(exml_query:attr(El, <<"from">>, <<>>)), + LocalJid = jid:from_binary(exml_query:attr(El, <<"to">>, <<>>)), + #jid{luser = <<>>, lresource = <<>>, lserver = LRemoteServer} = RemoteJid, + #jid{luser = <<>>, lresource = <<>>, lserver = LLocalServer} = LocalJid, + %% We use fromto() as seen by ejabberd_s2s_out and ejabberd_s2s + {LLocalServer, LRemoteServer}. diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index 34e903af936..248e75bb955 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -34,7 +34,7 @@ -export([start/2, start_link/2, start_connection/1, - terminate_if_waiting_delay/2, + terminate_if_waiting_delay/1, stop_connection/1]). %% p1_fsm callbacks (same as gen_fsm) @@ -75,6 +75,7 @@ authenticated = false :: boolean(), db_enabled = true :: boolean(), try_auth = true :: boolean(), + from_to :: ejabberd_s2s:fromto(), myname, server, queue, host_type :: mongooseim:host_type(), delay_to_retry = undefined_delay, @@ -174,7 +175,7 @@ stop_connection(Pid) -> %% {stop, StopReason} %%---------------------------------------------------------------------- -spec init(list()) -> {'ok', 'open_socket', state()}. -init([{From, Server} = _FromTo, Type]) -> +init([{From, Server} = FromTo, Type]) -> process_flag(trap_exit, true), ?LOG_DEBUG(#{what => s2s_out_started, text => <<"New outgoing s2s connection">>, @@ -202,6 +203,7 @@ init([{From, Server} = _FromTo, Type]) -> tls_required = TLSRequired, tls_options = tls_options(HostType), queue = queue:new(), + from_to = FromTo, myname = From, host_type = HostType, server = Server, @@ -359,7 +361,7 @@ wait_for_validation({xmlstreamelement, El}, StateData) -> {next_state, NextState, StateData, get_timeout_interval(NextState)}; {Pid, _Key, _SID} -> - send_event(Type, Pid, StateData), + send_event_to_s2s_in(Type, Pid, StateData), NextState = wait_for_validation, {next_state, NextState, StateData, get_timeout_interval(NextState)} @@ -549,7 +551,7 @@ stream_established({xmlstreamelement, El}, StateData) -> myname => StateData#state.myname, server => StateData#state.server}), case StateData#state.verify of {VPid, _VKey, _SID} -> - send_event(VType, VPid, StateData); + send_event_to_s2s_in(VType, VPid, StateData); _ -> ok end; @@ -1057,8 +1059,8 @@ get_max_retry_delay(HostType) -> %% @doc Terminate s2s_out connections that are in state wait_before_retry -terminate_if_waiting_delay(From, To) -> - FromTo = {From, To}, +-spec terminate_if_waiting_delay(ejabberd_s2s:fromto()) -> ok. +terminate_if_waiting_delay(FromTo) -> Pids = ejabberd_s2s:get_s2s_out_pids(FromTo), lists:foreach( fun(Pid) -> @@ -1102,11 +1104,11 @@ get_predefined_port(HostType, _Addr) -> outgoing_s2s_port(HostType). addr_type(Addr) when tuple_size(Addr) =:= 4 -> inet; addr_type(Addr) when tuple_size(Addr) =:= 8 -> inet6. -send_event(<<"valid">>, Pid, StateData) -> - Event = {valid, StateData#state.server, StateData#state.myname}, +send_event_to_s2s_in(<<"valid">>, Pid, StateData) -> + Event = {valid, StateData#state.from_to}, p1_fsm:send_event(Pid, Event); -send_event(_, Pid, StateData) -> - Event = {invalid, StateData#state.server, StateData#state.myname}, +send_event_to_s2s_in(_, Pid, StateData) -> + Event = {invalid, StateData#state.from_to}, p1_fsm:send_event(Pid, Event). get_acc_with_new_sext(?NS_SASL, Els1, {_SEXT, STLS, STLSReq}) -> diff --git a/src/s2s/mongoose_s2s_backend.erl b/src/s2s/mongoose_s2s_backend.erl index 9f352ec8435..742f00aa927 100644 --- a/src/s2s/mongoose_s2s_backend.erl +++ b/src/s2s/mongoose_s2s_backend.erl @@ -1,7 +1,7 @@ -module(mongoose_s2s_backend). -callback init(map()) -> any(). --callback get_s2s_out_pids(ejabberd_s2s:fromto()) -> [pid()]. +-callback get_s2s_out_pids(ejabberd_s2s:fromto()) -> ejabberd_s2s:s2s_pids(). -callback try_register(Pid :: pid(), ShouldWriteF :: fun(), FromTo :: ejabberd_s2s:fromto()) -> boolean(). From cebed0509dcaec93dcf763bfffbdf21880634cc8 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 6 Jul 2023 18:04:54 +0200 Subject: [PATCH 080/161] Use is_registered instead of new as a record field name in s2s_out --- src/ejabberd_s2s.erl | 2 +- src/ejabberd_s2s_in.erl | 2 ++ src/ejabberd_s2s_out.erl | 57 ++++++++++++++++++++++------------------ 3 files changed, 34 insertions(+), 27 deletions(-) diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index 083f7328e08..db2923733fd 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -67,7 +67,7 @@ %% Pair of hosts {FromServer(), ToServer()}. %% Used in a lot of API and backend functions. --type fromto() :: {jid:server(), jid:server()}. +-type fromto() :: {jid:lserver(), jid:lserver()}. %% Pids for ejabberd_s2s_out servers -type s2s_pids() :: [pid()]. diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index be6a698533f..a6f76e91097 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -343,11 +343,13 @@ stream_established({xmlstreamelement, El}, StateData) -> handle_routing_result(Res, El, StateData), {next_state, stream_established, StateData#state{timer = Timer}} end; +%% An event from ejabberd_s2s_out stream_established({valid, FromTo}, StateData) -> send_element(StateData, db_result_xml(FromTo, <<"valid">>)), Cons = maps:put(FromTo, established, StateData#state.connections), NSD = StateData#state{connections = Cons}, {next_state, stream_established, NSD}; +%% An event from ejabberd_s2s_out stream_established({invalid, FromTo}, StateData) -> send_element(StateData, db_result_xml(FromTo, <<"invalid">>)), Cons = maps:remove(FromTo, StateData#state.connections), diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index 248e75bb955..0aace698f48 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -79,7 +79,8 @@ myname, server, queue, host_type :: mongooseim:host_type(), delay_to_retry = undefined_delay, - new = false :: boolean(), + %% is_registered + is_registered = false :: boolean(), verify = false :: false | {pid(), Key :: binary(), SID :: binary()}, timer :: reference() }). @@ -190,7 +191,7 @@ init([{From, Server} = FromTo, Type]) -> {true, true} end, UseV10 = TLS, - {New, Verify} = case Type of + {IsRegistered, Verify} = case Type of new -> {true, false}; {verify, Pid, Key, SID} -> @@ -207,7 +208,7 @@ init([{From, Server} = FromTo, Type]) -> myname = From, host_type = HostType, server = Server, - new = New, + is_registered = IsRegistered, verify = Verify, timer = Timer}}. @@ -219,14 +220,14 @@ init([{From, Server} = FromTo, Type]) -> %%---------------------------------------------------------------------- -spec open_socket(_, state()) -> fsm_return(). open_socket(init, StateData = #state{host_type = HostType}) -> - log_s2s_out(StateData#state.new, + log_s2s_out(StateData#state.is_registered, StateData#state.myname, StateData#state.server, StateData#state.tls), ?LOG_DEBUG(#{what => s2s_open_socket, myname => StateData#state.myname, server => StateData#state.server, - new => StateData#state.new, + is_registered => StateData#state.is_registered, verify => StateData#state.verify}), AddrList = get_addr_list(HostType, StateData#state.server), case lists:foldl(fun(_, {ok, Socket}) -> @@ -694,7 +695,7 @@ terminate(Reason, StateName, StateData) -> ?LOG_DEBUG(#{what => s2s_out_closed, text => <<"ejabberd_s2s_out terminated">>, reason => Reason, state_name => StateName, myname => StateData#state.myname, server => StateData#state.server}), - case StateData#state.new of + case StateData#state.is_registered of false -> ok; true -> @@ -814,42 +815,32 @@ bounce_messages(Error) -> -spec send_db_request(state()) -> fsm_return(). send_db_request(StateData) -> - Server = StateData#state.server, - New = case StateData#state.new of + IsRegistered = case StateData#state.is_registered of false -> - ejabberd_s2s:try_register( - {StateData#state.myname, Server}); + ejabberd_s2s:try_register(StateData#state.from_to); true -> true end, - NewStateData = StateData#state{new = New}, + NewStateData = StateData#state{is_registered = IsRegistered}, try - case New of + case IsRegistered of false -> + %% Still not registered in the s2s table as an outgoing connection ok; true -> Key1 = ejabberd_s2s:key( StateData#state.host_type, - {StateData#state.myname, Server}, + StateData#state.from_to, StateData#state.remote_streamid), %% Initiating Server Sends Dialback Key %% https://xmpp.org/extensions/xep-0220.html#example-1 - send_element(StateData, - #xmlel{name = <<"db:result">>, - attrs = [{<<"from">>, StateData#state.myname}, - {<<"to">>, Server}], - children = [#xmlcdata{content = Key1}]}) + send_element(StateData, db_result_xml(StateData#state.from_to, Key1)) end, case StateData#state.verify of false -> ok; {_Pid, Key2, SID} -> - send_element(StateData, - #xmlel{name = <<"db:verify">>, - attrs = [{<<"from">>, StateData#state.myname}, - {<<"to">>, StateData#state.server}, - {<<"id">>, SID}], - children = [#xmlcdata{content = Key2}]}) + send_element(StateData, db_verify_xml(StateData#state.from_to, Key2, SID)) end, {next_state, wait_for_validation, NewStateData, ?FSMTIMEOUT*6} catch @@ -879,6 +870,21 @@ is_verify_res(#xmlel{name = Name, is_verify_res(_) -> false. +-spec db_result_xml(ejabberd_s2s:fromto(), binary()) -> exml:element(). +db_result_xml({LocalServer, RemoteServer}, Key) -> + #xmlel{name = <<"db:result">>, + attrs = [{<<"from">>, LocalServer}, + {<<"to">>, RemoteServer}], + children = [#xmlcdata{content = Key}]}. + +-spec db_verify_xml(ejabberd_s2s:fromto(), binary(), binary()) -> exml:element(). +db_verify_xml({LocalServer, RemoteServer}, Key, Id) -> + #xmlel{name = <<"db:result">>, + attrs = [{<<"from">>, LocalServer}, + {<<"to">>, RemoteServer}, + {<<"id">>, Id}], + children = [#xmlcdata{content = Key}]}. + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% SRV support @@ -1157,8 +1163,7 @@ handle_parsed_features({false, false, _, StateData = #state{authenticated = true myname => StateData#state.myname, server => StateData#state.server}), {next_state, stream_established, StateData#state{queue = queue:new()}}; -handle_parsed_features({true, _, _, StateData = #state{try_auth = true, new = New}}) when - New /= false -> +handle_parsed_features({true, _, _, StateData = #state{try_auth = true, is_registered = true}}) -> send_element(StateData, #xmlel{name = <<"auth">>, attrs = [{<<"xmlns">>, ?NS_SASL}, From 362bfe677bbaf69c1d6d195f8159a1d79f844e69 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 06:43:45 +0200 Subject: [PATCH 081/161] Add step number comments into s2s in/out --- src/ejabberd_s2s_in.erl | 4 ++++ src/ejabberd_s2s_out.erl | 31 +++++++++++++++++-------------- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index a6f76e91097..6f8e2b8b15a 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -559,6 +559,7 @@ cancel_timer(Timer) -> %% XEP-0185: Dialback Key Generation and Validation %% DB means dial-back +%% Receiving Server is Informed by Authoritative Server that Key is Valid or Invalid (Step 3) -spec db_verify_xml(ejabberd_s2s:fromto(), binary(), binary()) -> exml:element(). db_verify_xml({LocalServer, RemoteServer}, Id, Type) -> #xmlel{name = <<"db:verify">>, @@ -569,6 +570,7 @@ db_verify_xml({LocalServer, RemoteServer}, Id, Type) -> -spec db_result_xml(ejabberd_s2s:fromto(), binary()) -> exml:element(). db_result_xml({LocalServer, RemoteServer}, Type) -> + %% Receiving Server Sends Valid or Invalid Verification Result to Initiating Server (Step 4) #xmlel{name = <<"db:result">>, attrs = [{<<"from">>, LocalServer}, {<<"to">>, RemoteServer}, @@ -577,8 +579,10 @@ db_result_xml({LocalServer, RemoteServer}, Type) -> -spec parse_key_packet(exml:element()) -> false | {db_result | db_verify, FromTo :: ejabberd_s2s:fromto(), Id :: binary(), Key :: binary()}. parse_key_packet(El = #xmlel{name = <<"db:result">>}) -> + %% Initiating Server Sends Dialback Key (Step 1) parsed_key_packet(db_result, El); parse_key_packet(El = #xmlel{name = <<"db:verify">>}) -> + %% Receiving Server Sends Verification Request to Authoritative Server (Step 2) parsed_key_packet(db_verify, El); parse_key_packet(_) -> false. diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index 0aace698f48..b8337f4c536 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -330,8 +330,8 @@ wait_for_stream(closed, StateData) -> -spec wait_for_validation(ejabberd:xml_stream_item(), state()) -> fsm_return(). wait_for_validation({xmlstreamelement, El}, StateData) -> - case is_verify_res(El) of - {result, To, From, Id, Type} -> + case parse_verify_result(El) of + {db_result, To, From, Id, Type} -> ?LOG_DEBUG(#{what => s2s_receive_result, from => From, to => To, message_id => Id, type => Type}), case {Type, StateData#state.tls_enabled, StateData#state.tls_required} of @@ -352,7 +352,7 @@ wait_for_validation({xmlstreamelement, El}, StateData) -> %% TODO: bounce packets ?CLOSE_GENERIC(wait_for_validation, invalid_dialback_key, El, StateData) end; - {verify, To, From, Id, Type} -> + {db_verify, To, From, Id, Type} -> ?LOG_DEBUG(#{what => s2s_receive_verify, from => From, to => To, message_id => Id, type => Type}), case StateData#state.verify of @@ -545,8 +545,8 @@ wait_before_retry(_Event, StateData) -> stream_established({xmlstreamelement, El}, StateData) -> ?LOG_DEBUG(#{what => s2s_out_stream_established, exml_packet => El, myname => StateData#state.myname, server => StateData#state.server}), - case is_verify_res(El) of - {verify, VTo, VFrom, VId, VType} -> + case parse_verify_result(El) of + {db_verify, VTo, VFrom, VId, VType} -> ?LOG_DEBUG(#{what => s2s_recv_verify, to => VTo, from => VFrom, message_id => VId, type => VType, myname => StateData#state.myname, server => StateData#state.server}), @@ -852,24 +852,26 @@ send_db_request(StateData) -> end. --spec is_verify_res(exml:element()) -> 'false' | {'result', _, _, _, _} | {'verify', _, _, _, _}. -is_verify_res(#xmlel{name = Name, - attrs = Attrs}) when Name == <<"db:result">> -> - {result, +-spec parse_verify_result(exml:element()) -> false + | {db_verify | db_result, To :: binary(), From :: binary(), Id :: binary(), Type :: binary()}. +parse_verify_result(#xmlel{name = <<"db:result">>, attrs = Attrs}) -> + %% Receiving Server Sends Valid or Invalid Verification Result to Initiating Server (Step 4) + {db_result, xml:get_attr_s(<<"to">>, Attrs), xml:get_attr_s(<<"from">>, Attrs), xml:get_attr_s(<<"id">>, Attrs), xml:get_attr_s(<<"type">>, Attrs)}; -is_verify_res(#xmlel{name = Name, - attrs = Attrs}) when Name == <<"db:verify">> -> - {verify, +parse_verify_result(#xmlel{name = <<"db:verify">>, attrs = Attrs}) -> + %% Receiving Server is Informed by Authoritative Server that Key is Valid or Invalid (Step 3) + {db_verify, xml:get_attr_s(<<"to">>, Attrs), xml:get_attr_s(<<"from">>, Attrs), xml:get_attr_s(<<"id">>, Attrs), xml:get_attr_s(<<"type">>, Attrs)}; -is_verify_res(_) -> +parse_verify_result(_) -> false. +%% Initiating Server Sends Dialback Key (Step 1) -spec db_result_xml(ejabberd_s2s:fromto(), binary()) -> exml:element(). db_result_xml({LocalServer, RemoteServer}, Key) -> #xmlel{name = <<"db:result">>, @@ -877,9 +879,10 @@ db_result_xml({LocalServer, RemoteServer}, Key) -> {<<"to">>, RemoteServer}], children = [#xmlcdata{content = Key}]}. +%% Receiving Server Sends Verification Request to Authoritative Server (Step 2) -spec db_verify_xml(ejabberd_s2s:fromto(), binary(), binary()) -> exml:element(). db_verify_xml({LocalServer, RemoteServer}, Key, Id) -> - #xmlel{name = <<"db:result">>, + #xmlel{name = <<"db:verify">>, attrs = [{<<"from">>, LocalServer}, {<<"to">>, RemoteServer}, {<<"id">>, Id}], From 519e76ec6114370f0c9de70ed888fdf86272f23a Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 06:52:54 +0200 Subject: [PATCH 082/161] Replace replace_from_to_attrs with replace_from_to --- src/ejabberd_s2s.erl | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index db2923733fd..c99e68c05a3 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -128,7 +128,7 @@ key(HostType, {From, To}, StreamID) -> %%==================================================================== init([]) -> - db_init(), + internal_database_init(), set_shared_secret(), ejabberd_commands:register_commands(commands()), gen_hook:add_handlers(hooks()), @@ -173,11 +173,7 @@ do_route(From, To, Acc, Packet) -> ?LOG_DEBUG(#{what => s2s_found_connection, text => <<"Send packet to s2s connection">>, s2s_pid => Pid, acc => Acc}), - #xmlel{attrs = Attrs} = Packet, - NewAttrs = jlib:replace_from_to_attrs(jid:to_binary(From), - jid:to_binary(To), - Attrs), - NewPacket = Packet#xmlel{attrs = NewAttrs}, + NewPacket = jlib:replace_from_to(From, To, Packet), Acc1 = mongoose_hooks:s2s_send_packet(Acc, From, To, Packet), send_element(Pid, Acc1, NewPacket), {done, Acc1}; @@ -478,7 +474,7 @@ lookup_certfile(HostType) -> %% Backend logic below: -db_init() -> +internal_database_init() -> Backend = mongoose_config:get_opt(s2s_backend), mongoose_s2s_backend:init(#{backend => Backend}). From 923611d67b3ffa900d3db4e3ed49060426d9f4e4 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 07:25:34 +0200 Subject: [PATCH 083/161] Get rid of source from s2s_secret table --- src/ejabberd_s2s.erl | 60 +++++++++++++++++++------------- src/s2s/mongoose_s2s_backend.erl | 12 +++---- src/s2s/mongoose_s2s_cets.erl | 10 +++--- src/s2s/mongoose_s2s_mnesia.erl | 12 +++---- 4 files changed, 51 insertions(+), 43 deletions(-) diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index c99e68c05a3..3a597d0bbeb 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -65,7 +65,9 @@ -define(DEFAULT_MAX_S2S_CONNECTIONS, 1). -define(DEFAULT_MAX_S2S_CONNECTIONS_PER_NODE, 1). -%% Pair of hosts {FromServer(), ToServer()}. +%% Pair of hosts {FromServer, ToServer}. +%% FromServer is the local server. +%% ToServer is the remote server. %% Used in a lot of API and backend functions. -type fromto() :: {jid:lserver(), jid:lserver()}. @@ -74,10 +76,9 @@ -record(state, {}). --type secret_source() :: config | random. -type base16_secret() :: binary(). --export_type([fromto/0, s2s_pids/0, secret_source/0, base16_secret/0]). +-export_type([fromto/0, s2s_pids/0, base16_secret/0]). %%==================================================================== %% API @@ -118,7 +119,7 @@ node_cleanup(Acc, #{node := Node}, _) -> -spec key(mongooseim:host_type(), fromto(), binary()) -> binary(). key(HostType, {From, To}, StreamID) -> - {ok, {_, Secret}} = get_shared_secret(HostType), + {ok, Secret} = get_shared_secret(HostType), SecretHashed = base16:encode(crypto:hash(sha256, Secret)), HMac = crypto:mac(hmac, sha256, SecretHashed, [From, " ", To, " ", StreamID]), base16:encode(HMac). @@ -440,27 +441,36 @@ set_shared_secret() -> ok. set_shared_secret(HostType) -> - {Source, Secret} = get_shared_secret_from_config_or_make_new(HostType), - case get_shared_secret(HostType) of - {error, not_found} -> - %% Write secret for the first time - register_secret(HostType, Source, Secret); - {ok, {_, OldSecret}} when OldSecret =:= Secret -> + %% register_secret is replicated across all nodes. + %% So, when starting a node with updated secret in the config, + %% we would replace stored secret on all nodes at once. + %% There could be a small race condition when dialback key checks would get rejected, + %% But there would not be conflicts when some nodes have one secret stored and others - another. + case {get_shared_secret(HostType), get_shared_secret_from_config(HostType)} of + {{error, not_found}, {ok, Secret}} -> + %% Write the secret from the config into Mnesia/CETS for the first time + register_secret(HostType, Secret); + {{error, not_found}, {error, not_found}} -> + %% Write a random secret into Mnesia/CETS for the first time + register_secret(HostType, make_random_secret()); + {{ok, Secret}, {ok, Secret}} -> + %% Config matches Mnesia/CETS skip_same; - {ok, _} when Source =:= config -> + {{ok, _OldSecret}, {ok, NewSecret}} -> ?LOG_INFO(#{what => overwrite_secret_from_config}), - register_secret(HostType, Source, Secret); - {ok, _} -> - ok + register_secret(HostType, NewSecret); + {{ok, _OldSecret}, {error, not_found}} -> + %% Keep the secret already stored in Mnesia/CETS + keep_existing end. -get_shared_secret_from_config_or_make_new(HostType) -> - case mongoose_config:lookup_opt([{s2s, HostType}, shared]) of - {ok, SecretFromConfig} -> - {config, SecretFromConfig}; - {error, not_found} -> - {random, base16:encode(crypto:strong_rand_bytes(10))} - end. +-spec get_shared_secret_from_config(mongooseim:host_type()) -> {ok, base16_secret()} | {error, not_found}. +get_shared_secret_from_config(HostType) -> + mongoose_config:lookup_opt([{s2s, HostType}, shared]). + +-spec make_random_secret() -> base16_secret(). +make_random_secret() -> + base16:encode(crypto:strong_rand_bytes(10)). -spec lookup_certfile(mongooseim:host_type()) -> {ok, string()} | {error, not_found}. lookup_certfile(HostType) -> @@ -495,10 +505,10 @@ call_node_cleanup(Node) -> remove_connection(FromTo, Pid) -> mongoose_s2s_backend:remove_connection(FromTo, Pid). --spec get_shared_secret(mongooseim:host_type()) -> {ok, {secret_source(), base16_secret()}} | {error, not_found}. +-spec get_shared_secret(mongooseim:host_type()) -> {ok, base16_secret()} | {error, not_found}. get_shared_secret(HostType) -> mongoose_s2s_backend:get_shared_secret(HostType). --spec register_secret(mongooseim:host_type(), ejabberd_s2s:secret_source(), ejabberd_s2s:base16_secret()) -> ok. -register_secret(HostType, Source, Secret) -> - mongoose_s2s_backend:register_secret(HostType, Source, Secret). +-spec register_secret(mongooseim:host_type(), ejabberd_s2s:base16_secret()) -> ok. +register_secret(HostType, Secret) -> + mongoose_s2s_backend:register_secret(HostType, Secret). diff --git a/src/s2s/mongoose_s2s_backend.erl b/src/s2s/mongoose_s2s_backend.erl index 742f00aa927..7c61cd53f36 100644 --- a/src/s2s/mongoose_s2s_backend.erl +++ b/src/s2s/mongoose_s2s_backend.erl @@ -8,10 +8,9 @@ -callback remove_connection(FromTo :: ejabberd_s2s:fromto(), Pid :: pid()) -> ok. -callback node_cleanup(Node :: node()) -> term(). -callback register_secret(HostType :: mongooseim:host_type(), - Source :: ejabberd_s2s:secret_source(), Secret :: ejabberd_s2s:base16_secret()) -> ok. -callback get_shared_secret(mongooseim:host_type()) -> - {ok, {ejabberd_s2s:secret_source(), ejabberd_s2s:base16_secret()}} | {error, not_found}. + {ok, ejabberd_s2s:base16_secret()} | {error, not_found}. -export([init/1, get_s2s_out_pids/1, @@ -19,7 +18,7 @@ remove_connection/2, node_cleanup/1]). --export([register_secret/3, +-export([register_secret/2, get_shared_secret/1]). -ignore_xref([behaviour_info/1]). @@ -52,12 +51,11 @@ node_cleanup(Node) -> mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [Node]). -spec register_secret(HostType :: mongooseim:host_type(), - Source :: ejabberd_s2s:secret_source(), Secret :: ejabberd_s2s:base16_secret()) -> ok. -register_secret(HostType, Source, Secret) -> - mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [HostType, Source, Secret]). +register_secret(HostType, Secret) -> + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [HostType, Secret]). -spec get_shared_secret(mongooseim:host_type()) -> - {ok, {ejabberd_s2s:secret_source(), ejabberd_s2s:base16_secret()}} | {error, not_found}. + {ok, ejabberd_s2s:base16_secret()} | {error, not_found}. get_shared_secret(HostType) -> mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [HostType]). diff --git a/src/s2s/mongoose_s2s_cets.erl b/src/s2s/mongoose_s2s_cets.erl index c65c9359706..0beff898b06 100644 --- a/src/s2s/mongoose_s2s_cets.erl +++ b/src/s2s/mongoose_s2s_cets.erl @@ -7,7 +7,7 @@ remove_connection/2, node_cleanup/1]). --export([register_secret/3, +-export([register_secret/2, get_shared_secret/1]). -include("mongoose_logger.hrl"). @@ -48,14 +48,14 @@ node_cleanup(Node) -> ets:select_delete(?TABLE, [{R, [Guard], [true]}]). %% Secrets -register_secret(HostType, Source, Secret) -> - cets:insert(?SECRET_TABLE, {HostType, Source, Secret}), +register_secret(HostType, Secret) -> + cets:insert(?SECRET_TABLE, {HostType, Secret}), ok. get_shared_secret(HostType) -> case ets:lookup(?SECRET_TABLE, HostType) of - [{_HostType, Source, Secret}] -> - {ok, {Source, Secret}}; + [{_HostType, Secret}] -> + {ok, Secret}; [] -> {error, not_found} end. diff --git a/src/s2s/mongoose_s2s_mnesia.erl b/src/s2s/mongoose_s2s_mnesia.erl index 1fdf5f09340..582c473a67d 100644 --- a/src/s2s/mongoose_s2s_mnesia.erl +++ b/src/s2s/mongoose_s2s_mnesia.erl @@ -7,7 +7,7 @@ remove_connection/2, node_cleanup/1]). --export([register_secret/3, +-export([register_secret/2, get_shared_secret/1]). -record(s2s, { @@ -15,7 +15,7 @@ pid :: pid() | '$1' }). --record(s2s_secret, {host_type, source, secret}). +-record(s2s_secret, {host_type, secret}). -include("mongoose_logger.hrl"). @@ -90,15 +90,15 @@ init_secrets() -> mnesia:create_table(s2s_secret, Opts), mnesia:add_table_copy(s2s_secret, node(), ram_copies). -register_secret(HostType, Source, Secret) -> - Rec = #s2s_secret{host_type = HostType, source = Source, secret = Secret}, +register_secret(HostType, Secret) -> + Rec = #s2s_secret{host_type = HostType, secret = Secret}, {atomic, _} = mnesia:transaction(fun() -> mnesia:write(Rec) end), ok. get_shared_secret(HostType) -> case mnesia:dirty_read(s2s_secret, HostType) of - [#s2s_secret{source = Source, secret = Secret}] -> - {ok, {Source, Secret}}; + [#s2s_secret{secret = Secret}] -> + {ok, Secret}; [] -> {error, not_found} end. From db076f42712264d3e87f97ceed4fc9e8caaa1f5e Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 07:38:33 +0200 Subject: [PATCH 084/161] Remove incoming_s2s_number/0, outgoing_s2s_number/0 from ignore_xref --- src/ejabberd_s2s.erl | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index 3a597d0bbeb..551d2e188fa 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -55,8 +55,7 @@ %% ejabberd API -export([get_info_s2s_connections/1]). --ignore_xref([get_info_s2s_connections/1, - incoming_s2s_number/0, outgoing_s2s_number/0, start_link/0]). +-ignore_xref([get_info_s2s_connections/1, start_link/0]). -include("mongoose.hrl"). -include("jlib.hrl"). @@ -100,13 +99,19 @@ route(From, To, Acc, Packet) -> try_register(FromTo) -> ShouldWriteF = should_write_f(FromTo), Pid = self(), - case call_try_register(Pid, ShouldWriteF, FromTo) of - true -> - true; + IsRegistered = call_try_register(Pid, ShouldWriteF, FromTo), + case IsRegistered of false -> - ?LOG_ERROR(#{what => s2s_register_failed, from_to => FromTo}), - false - end. + %% This usually happens when a ejabberd_s2s_out connection is established during dialback + %% procedure to check the key. + %% We still are fine, we just would not use that s2s connection to route + %% any stanzas to the remote server. + %% Could be a sign of abuse or a bug though, so use logging here. + ?LOG_INFO(#{what => s2s_register_failed, from_to => FromTo, pid => self()}); + _ -> + ok + end, + IsRegistered. %%==================================================================== %% Hooks callbacks @@ -250,7 +255,6 @@ open_several_connections(N, FromTo) -> -spec new_connection(FromTo :: fromto(), ShouldWriteF :: fun()) -> ok. new_connection(FromTo, ShouldWriteF) -> - %% Serialize opening of connections {ok, Pid} = ejabberd_s2s_out:start(FromTo, new), case call_try_register(Pid, ShouldWriteF, FromTo) of true -> From fed53e68f5495be061087f7eccc6842f1cef8cf6 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 07:44:26 +0200 Subject: [PATCH 085/161] Move get_info_s2s_connections into mongoose_s2s_info --- big_tests/tests/s2s_SUITE.erl | 2 +- src/ejabberd_s2s.erl | 43 +------------------------------- src/s2s/mongoose_s2s_info.erl | 46 +++++++++++++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 43 deletions(-) create mode 100644 src/s2s/mongoose_s2s_info.erl diff --git a/big_tests/tests/s2s_SUITE.erl b/big_tests/tests/s2s_SUITE.erl index 08911f0a01c..04239f3b026 100644 --- a/big_tests/tests/s2s_SUITE.erl +++ b/big_tests/tests/s2s_SUITE.erl @@ -160,7 +160,7 @@ connections_info(Config) -> ok. get_s2s_connections(RPCSpec, Domain, Type)-> - AllS2SConnections = ?dh:rpc(RPCSpec, ejabberd_s2s, get_info_s2s_connections, [Type]), + AllS2SConnections = ?dh:rpc(RPCSpec, mongoose_s2s_info, get_info_s2s_connections, [Type]), % ct:pal("Node = ~p, ConnectionType = ~p~nAllS2SConnections(~p): ~p", % [maps:get(node, RPCSpec), Type, length(AllS2SConnections), AllS2SConnections]), DomainS2SConnections = diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index 551d2e188fa..c4c9b1cb956 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -52,10 +52,7 @@ -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). -%% ejabberd API --export([get_info_s2s_connections/1]). - --ignore_xref([get_info_s2s_connections/1, start_link/0]). +-ignore_xref([start_link/0]). -include("mongoose.hrl"). -include("jlib.hrl"). @@ -401,44 +398,6 @@ allow_host({FromServer, ToServer}) -> end end. -%% @doc Get information about S2S connections of the specified type. --spec get_info_s2s_connections('in' | 'out') -> [[{atom(), any()}, ...]]. -get_info_s2s_connections(Type) -> - ChildType = case Type of - in -> ejabberd_s2s_in_sup; - out -> ejabberd_s2s_out_sup - end, - Connections = supervisor:which_children(ChildType), - get_s2s_info(Connections, Type). - --type connstate() :: 'restarting' | 'undefined' | pid(). --type conn() :: { any(), connstate(), 'supervisor' | 'worker', 'dynamic' | [_] }. --spec get_s2s_info(Connections :: [conn()], - Type :: 'in' | 'out' - ) -> [[{any(), any()}, ...]]. % list of lists -get_s2s_info(Connections, Type)-> - complete_s2s_info(Connections, Type, []). - --spec complete_s2s_info(Connections :: [conn()], - Type :: 'in' | 'out', - Result :: [[{any(), any()}, ...]] % list of lists - ) -> [[{any(), any()}, ...]]. % list of lists -complete_s2s_info([], _, Result)-> - Result; -complete_s2s_info([Connection|T], Type, Result)-> - {_, PID, _, _} = Connection, - State = get_s2s_state(PID), - complete_s2s_info(T, Type, [State|Result]). - --spec get_s2s_state(connstate()) -> [{atom(), any()}, ...]. -get_s2s_state(S2sPid) -> - Infos = case gen_fsm_compat:sync_send_all_state_event(S2sPid, get_state_infos) of - {state_infos, Is} -> [{status, open} | Is]; - {noproc, _} -> [{status, closed}]; %% Connection closed - {badrpc, _} -> [{status, error}] - end, - [{s2s_pid, S2sPid} | Infos]. - -spec set_shared_secret() -> ok. set_shared_secret() -> [set_shared_secret(HostType) || HostType <- ?ALL_HOST_TYPES], diff --git a/src/s2s/mongoose_s2s_info.erl b/src/s2s/mongoose_s2s_info.erl new file mode 100644 index 00000000000..5fb5bfa377e --- /dev/null +++ b/src/s2s/mongoose_s2s_info.erl @@ -0,0 +1,46 @@ +%% Some ugly code only used in tests. +%% It was originally in ejabberd_s2s, but it was moved out to improve readability. +-module(mongoose_s2s_info). + +%% ejabberd API +-export([get_info_s2s_connections/1]). +-ignore_xref([get_info_s2s_connections/1]). + +-type connstate() :: 'restarting' | 'undefined' | pid(). +-type conn() :: { any(), connstate(), 'supervisor' | 'worker', 'dynamic' | [_] }. + +%% @doc Get information about S2S connections of the specified type. +-spec get_info_s2s_connections('in' | 'out') -> [[{atom(), any()}, ...]]. +get_info_s2s_connections(Type) -> + ChildType = case Type of + in -> ejabberd_s2s_in_sup; + out -> ejabberd_s2s_out_sup + end, + Connections = supervisor:which_children(ChildType), + get_s2s_info(Connections, Type). + +-spec get_s2s_info(Connections :: [conn()], + Type :: 'in' | 'out' + ) -> [[{any(), any()}, ...]]. % list of lists +get_s2s_info(Connections, Type)-> + complete_s2s_info(Connections, Type, []). + +-spec complete_s2s_info(Connections :: [conn()], + Type :: 'in' | 'out', + Result :: [[{any(), any()}, ...]] % list of lists + ) -> [[{any(), any()}, ...]]. % list of lists +complete_s2s_info([], _, Result)-> + Result; +complete_s2s_info([Connection|T], Type, Result)-> + {_, PID, _, _} = Connection, + State = get_s2s_state(PID), + complete_s2s_info(T, Type, [State|Result]). + +-spec get_s2s_state(connstate()) -> [{atom(), any()}, ...]. +get_s2s_state(S2sPid) -> + Infos = case gen_fsm_compat:sync_send_all_state_event(S2sPid, get_state_infos) of + {state_infos, Is} -> [{status, open} | Is]; + {noproc, _} -> [{status, closed}]; %% Connection closed + {badrpc, _} -> [{status, error}] + end, + [{s2s_pid, S2sPid} | Infos]. From 76c3942a1c715932cccc638f566bf55d19cdd647 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 07:50:07 +0200 Subject: [PATCH 086/161] Remove unneeded formatting from ejabberd_s2s --- src/ejabberd_s2s.erl | 33 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index c4c9b1cb956..ee80634c5f9 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -110,6 +110,13 @@ try_register(FromTo) -> end, IsRegistered. +-spec key(mongooseim:host_type(), fromto(), binary()) -> binary(). +key(HostType, {From, To}, StreamID) -> + {ok, Secret} = get_shared_secret(HostType), + SecretHashed = base16:encode(crypto:hash(sha256, Secret)), + HMac = crypto:mac(hmac, sha256, SecretHashed, [From, " ", To, " ", StreamID]), + base16:encode(HMac). + %%==================================================================== %% Hooks callbacks %%==================================================================== @@ -119,13 +126,6 @@ node_cleanup(Acc, #{node := Node}, _) -> Res = call_node_cleanup(Node), {ok, maps:put(?MODULE, Res, Acc)}. --spec key(mongooseim:host_type(), fromto(), binary()) -> binary(). -key(HostType, {From, To}, StreamID) -> - {ok, Secret} = get_shared_secret(HostType), - SecretHashed = base16:encode(crypto:hash(sha256, Secret)), - HMac = crypto:mac(hmac, sha256, SecretHashed, [From, " ", To, " ", StreamID]), - base16:encode(HMac). - %%==================================================================== %% gen_server callbacks %%==================================================================== @@ -313,10 +313,7 @@ should_write_f(FromTo) -> needed_extra_connections_number(FromTo, Connections) > 0 end. -%%-------------------------------------------------------------------- -%% Description: Return true if the destination must be considered as a -%% service. -%% -------------------------------------------------------------------- +%% Returns true if the destination must be considered as a service. -spec is_service(fromto()) -> boolean(). is_service({FromServer, ToServer} = _FromTo) -> case mongoose_config:lookup_opt({route_subdomains, FromServer}) of @@ -328,11 +325,10 @@ is_service({FromServer, ToServer} = _FromTo) -> lists:any(P, parent_domains(ToServer)) end. --spec parent_domains(binary()) -> [binary(), ...]. +-spec parent_domains(jid:lserver()) -> [jid:lserver()]. parent_domains(Domain) -> parent_domains(Domain, [Domain]). --spec parent_domains(binary(), [binary(), ...]) -> [binary(), ...]. parent_domains(<<>>, Acc) -> lists:reverse(Acc); parent_domains(<<$., Rest/binary>>, Acc) -> @@ -347,10 +343,8 @@ send_element(Pid, Acc, El) -> timeout() -> 600000. -%%-------------------------------------------------------------------- -%% Function: domain_utf8_to_ascii(Domain) -> binary() | false -%% Description: Converts a UTF-8 domain to ASCII (IDNA) -%% -------------------------------------------------------------------- + +%% Converts a UTF-8 domain to ASCII (IDNA) -spec domain_utf8_to_ascii(binary() | string()) -> binary() | false. domain_utf8_to_ascii(Domain) -> case catch idna:utf8_to_ascii(Domain) of @@ -360,10 +354,7 @@ domain_utf8_to_ascii(Domain) -> list_to_binary(AsciiDomain) end. -%%%---------------------------------------------------------------------- -%%% ejabberd commands - --spec commands() -> [ejabberd_commands:cmd(), ...]. +-spec commands() -> [ejabberd_commands:cmd()]. commands() -> [ #ejabberd_commands{name = incoming_s2s_number, From babc56f8bdf57142d06fbdc37da46db659efc414 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 09:10:37 +0200 Subject: [PATCH 087/161] Move library functions into mongoose_s2s_lib Remove ShouldWriteF --- src/cert_utils.erl | 2 +- src/ejabberd_s2s.erl | 270 ++++++------------------------- src/ejabberd_s2s_in.erl | 10 +- src/ejabberd_s2s_out.erl | 15 +- src/s2s/mongoose_s2s_backend.erl | 8 +- src/s2s/mongoose_s2s_cets.erl | 8 +- src/s2s/mongoose_s2s_lib.erl | 217 +++++++++++++++++++++++++ src/s2s/mongoose_s2s_mnesia.erl | 8 +- 8 files changed, 285 insertions(+), 253 deletions(-) create mode 100644 src/s2s/mongoose_s2s_lib.erl diff --git a/src/cert_utils.erl b/src/cert_utils.erl index 4f9eed4bd5a..bc9724ba088 100644 --- a/src/cert_utils.erl +++ b/src/cert_utils.erl @@ -97,7 +97,7 @@ get_lserver_from_addr(V, UTF8) when is_binary(V); is_list(V) -> Val = convert_to_bin(V), case {jid:from_binary(Val), UTF8} of {#jid{luser = <<"">>, lserver = LD, lresource = <<"">>}, true} -> - case ejabberd_s2s:domain_utf8_to_ascii(LD) of + case mongoose_s2s_lib:domain_utf8_to_ascii(LD) of false -> []; PCLD -> [PCLD] end; diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index ee80634c5f9..0afe4ab4794 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -38,12 +38,7 @@ key/3, get_s2s_out_pids/1, try_register/1, - remove_connection/2, - allow_host/1, - domain_utf8_to_ascii/1, - timeout/0, - lookup_certfile/1 - ]). + remove_connection/2]). %% Hooks callbacks -export([node_cleanup/3]). @@ -56,10 +51,6 @@ -include("mongoose.hrl"). -include("jlib.hrl"). --include("ejabberd_commands.hrl"). - --define(DEFAULT_MAX_S2S_CONNECTIONS, 1). --define(DEFAULT_MAX_S2S_CONNECTIONS_PER_NODE, 1). %% Pair of hosts {FromServer, ToServer}. %% FromServer is the local server. @@ -79,9 +70,8 @@ %%==================================================================== %% API %%==================================================================== -%%-------------------------------------------------------------------- -%% Description: Starts the server -%%-------------------------------------------------------------------- + +%% Starts the server -spec start_link() -> ignore | {error, _} | {ok, pid()}. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). @@ -92,11 +82,11 @@ filter(From, To, Acc, Packet) -> route(From, To, Acc, Packet) -> do_route(From, To, Acc, Packet). --spec try_register(fromto()) -> boolean(). +%% Called by ejabberd_s2s_out process. +-spec try_register(fromto()) -> IsRegistered :: boolean(). try_register(FromTo) -> - ShouldWriteF = should_write_f(FromTo), Pid = self(), - IsRegistered = call_try_register(Pid, ShouldWriteF, FromTo), + IsRegistered = call_try_register(Pid, FromTo), case IsRegistered of false -> %% This usually happens when a ejabberd_s2s_out connection is established during dialback @@ -133,7 +123,7 @@ node_cleanup(Acc, #{node := Node}, _) -> init([]) -> internal_database_init(), set_shared_secret(), - ejabberd_commands:register_commands(commands()), + ejabberd_commands:register_commands(mongoose_s2s_lib:commands()), gen_hook:add_handlers(hooks()), {ok, #state{}}. @@ -151,7 +141,7 @@ handle_info(Msg, State) -> terminate(_Reason, _State) -> gen_hook:delete_handlers(hooks()), - ejabberd_commands:unregister_commands(commands()), + ejabberd_commands:unregister_commands(mongoose_s2s_lib:commands()), ok. code_change(_OldVsn, State, _Extra) -> @@ -195,199 +185,63 @@ do_route(From, To, Acc, Packet) -> end end. --spec make_from_to(From :: jid:jid(), To :: jid:jid()) -> fromto(). -make_from_to(#jid{lserver = FromServer}, #jid{lserver = ToServer}) -> - {FromServer, ToServer}. +-spec send_element(pid(), mongoose_acc:t(), exml:element()) -> ok. +send_element(Pid, Acc, El) -> + Pid ! {send_element, Acc, El}, + ok. -spec find_connection(From :: jid:jid(), To :: jid:jid()) -> {ok, pid()} | {error, not_allowed}. find_connection(From, To) -> - FromTo = make_from_to(From, To), + FromTo = mongoose_s2s_lib:make_from_to(From, To), ?LOG_DEBUG(#{what => s2s_find_connection, from_to => FromTo}), OldCons = get_s2s_out_pids(FromTo), NewCons = ensure_enough_connections(FromTo, OldCons), case NewCons of [] -> {error, not_allowed}; - _ -> - {ok, choose_pid(From, NewCons)} + [_|_] -> + {ok, mongoose_s2s_lib:choose_pid(From, NewCons)} end. %% Opens more connections if needed and allowed. +%% Returns an updated list of connections. +-spec ensure_enough_connections(fromto(), s2s_pids()) -> s2s_pids(). ensure_enough_connections(FromTo, OldCons) -> - NeededConnections = needed_connections_number_if_allowed(FromTo, OldCons), + NeededConnections = mongoose_s2s_lib:needed_extra_connections_number_if_allowed(FromTo, OldCons), case NeededConnections of 0 -> OldCons; _ -> - open_several_connections(NeededConnections, FromTo), + open_new_connections(NeededConnections, FromTo), %% Query for s2s pids one more time get_s2s_out_pids(FromTo) end. -%% Prefers the local connection (i.e. not on the remote node) --spec choose_pid(From :: jid:jid(), Pids :: s2s_pids()) -> pid(). -choose_pid(From, [_|_] = Pids) -> - Pids1 = case filter_local_pids(Pids) of - [] -> Pids; - FilteredPids -> FilteredPids - end, - % Use sticky connections based on the JID of the sender - % (without the resource to ensure that a muc room always uses the same connection) - Pid = lists:nth(erlang:phash2(jid:to_bare(From), length(Pids1)) + 1, Pids1), - ?LOG_DEBUG(#{what => s2s_choose_pid, from => From, s2s_pid => Pid}), - Pid. - -%% Returns only pids from the current node. --spec filter_local_pids(s2s_pids()) -> s2s_pids(). -filter_local_pids(Pids) -> - Node = node(), - [Pid || Pid <- Pids, node(Pid) == Node]. - --spec open_several_connections(N :: pos_integer(), FromTo :: fromto()) -> ok. -open_several_connections(N, FromTo) -> - ShouldWriteF = should_write_f(FromTo), - [new_connection(FromTo, ShouldWriteF) || _N <- lists:seq(1, N)], +-spec open_new_connections(N :: pos_integer(), FromTo :: fromto()) -> ok. +open_new_connections(N, FromTo) -> + [open_new_connection(FromTo) || _N <- lists:seq(1, N)], ok. --spec new_connection(FromTo :: fromto(), ShouldWriteF :: fun()) -> ok. -new_connection(FromTo, ShouldWriteF) -> +-spec open_new_connection(FromTo :: fromto()) -> ok. +open_new_connection(FromTo) -> + %% Start a process, but do not connect to the server yet. {ok, Pid} = ejabberd_s2s_out:start(FromTo, new), - case call_try_register(Pid, ShouldWriteF, FromTo) of - true -> - ?LOG_INFO(#{what => s2s_new_connection, - text => <<"New s2s connection started">>, - from_to => FromTo, s2s_pid => Pid}), - ejabberd_s2s_out:start_connection(Pid); - false -> - ejabberd_s2s_out:stop_connection(Pid) - end, + %% Try to write the Pid into Mnesia/CETS + IsRegistered = call_try_register(Pid, FromTo), + %% If successful, create an actual network connection + %% If not successful, remove the process + maybe_start_connection(Pid, FromTo, IsRegistered), ok. --spec max_s2s_connections(fromto()) -> pos_integer(). -max_s2s_connections(FromTo) -> - match_integer_acl_rule(FromTo, max_s2s_connections, - ?DEFAULT_MAX_S2S_CONNECTIONS). - --spec max_s2s_connections_per_node(fromto()) -> pos_integer(). -max_s2s_connections_per_node(FromTo) -> - match_integer_acl_rule(FromTo, max_s2s_connections_per_node, - ?DEFAULT_MAX_S2S_CONNECTIONS_PER_NODE). - --spec match_integer_acl_rule(fromto(), atom(), integer()) -> term(). -match_integer_acl_rule({FromServer, ToServer}, Rule, Default) -> - {ok, HostType} = mongoose_domain_api:get_host_type(FromServer), - ToServerJid = jid:make(<<>>, ToServer, <<>>), - case acl:match_rule(HostType, Rule, ToServerJid) of - Int when is_integer(Int) -> Int; - _ -> Default - end. - -needed_connections_number_if_allowed(FromTo, OldCons) -> - case is_s2s_allowed_for_host(FromTo, OldCons) of - true -> - needed_extra_connections_number(FromTo, OldCons); - false -> - 0 - end. - -%% Checks: -%% - if the host is not a service -%% - and if the s2s host is not blacklisted or is in whitelist --spec is_s2s_allowed_for_host(fromto(), _OldConnections :: s2s_pids()) -> boolean(). -is_s2s_allowed_for_host(_FromTo, [_|_]) -> - true; %% Has outgoing connections established, skip the check -is_s2s_allowed_for_host(FromTo, []) -> - not is_service(FromTo) andalso allow_host(FromTo). - --spec needed_extra_connections_number(fromto(), s2s_pids()) -> non_neg_integer(). -needed_extra_connections_number(FromTo, Connections) -> - MaxConnections = max_s2s_connections(FromTo), - MaxConnectionsPerNode = max_s2s_connections_per_node(FromTo), - LocalPids = filter_local_pids(Connections), - lists:min([MaxConnections - length(Connections), - MaxConnectionsPerNode - length(LocalPids)]). - -should_write_f(FromTo) -> - fun(Connections) when is_list(Connections) -> - needed_extra_connections_number(FromTo, Connections) > 0 - end. - -%% Returns true if the destination must be considered as a service. --spec is_service(fromto()) -> boolean(). -is_service({FromServer, ToServer} = _FromTo) -> - case mongoose_config:lookup_opt({route_subdomains, FromServer}) of - {ok, s2s} -> % bypass RFC 3920 10.3 - false; - {error, not_found} -> - Hosts = ?MYHOSTS, - P = fun(ParentDomain) -> lists:member(ParentDomain, Hosts) end, - lists:any(P, parent_domains(ToServer)) - end. - --spec parent_domains(jid:lserver()) -> [jid:lserver()]. -parent_domains(Domain) -> - parent_domains(Domain, [Domain]). - -parent_domains(<<>>, Acc) -> - lists:reverse(Acc); -parent_domains(<<$., Rest/binary>>, Acc) -> - parent_domains(Rest, [Rest | Acc]); -parent_domains(<<_, Rest/binary>>, Acc) -> - parent_domains(Rest, Acc). - --spec send_element(pid(), mongoose_acc:t(), exml:element()) -> - {'send_element', mongoose_acc:t(), exml:element()}. -send_element(Pid, Acc, El) -> - Pid ! {send_element, Acc, El}. - -timeout() -> - 600000. - -%% Converts a UTF-8 domain to ASCII (IDNA) --spec domain_utf8_to_ascii(binary() | string()) -> binary() | false. -domain_utf8_to_ascii(Domain) -> - case catch idna:utf8_to_ascii(Domain) of - {'EXIT', _} -> - false; - AsciiDomain -> - list_to_binary(AsciiDomain) - end. - --spec commands() -> [ejabberd_commands:cmd()]. -commands() -> - [ - #ejabberd_commands{name = incoming_s2s_number, - tags = [stats, s2s], - desc = "Number of incoming s2s connections on the node", - module = stats_api, function = incoming_s2s_number, - args = [], - result = {s2s_incoming, integer}}, - #ejabberd_commands{name = outgoing_s2s_number, - tags = [stats, s2s], - desc = "Number of outgoing s2s connections on the node", - module = stats_api, function = outgoing_s2s_number, - args = [], - result = {s2s_outgoing, integer}} - ]. - -%% Check if host is in blacklist or white list --spec allow_host(fromto()) -> boolean(). -allow_host({FromServer, ToServer}) -> - case mongoose_domain_api:get_host_type(FromServer) of - {error, not_found} -> - false; - {ok, HostType} -> - case mongoose_config:lookup_opt([{s2s, HostType}, host_policy, ToServer]) of - {ok, allow} -> - true; - {ok, deny} -> - false; - {error, not_found} -> - mongoose_config:get_opt([{s2s, HostType}, default_policy]) =:= allow - andalso mongoose_hooks:s2s_allow_host(FromServer, ToServer) =:= allow - end - end. +-spec maybe_start_connection(Pid :: pid(), FromTo :: fromto(), IsRegistered :: boolean()) -> ok. +maybe_start_connection(Pid, FromTo, true) -> + ?LOG_INFO(#{what => s2s_new_connection, + text => <<"New s2s connection started">>, + from_to => FromTo, s2s_pid => Pid}), + ejabberd_s2s_out:start_connection(Pid); +maybe_start_connection(Pid, _FromTo, false) -> + ejabberd_s2s_out:stop_connection(Pid). -spec set_shared_secret() -> ok. set_shared_secret() -> @@ -395,47 +249,13 @@ set_shared_secret() -> ok. set_shared_secret(HostType) -> - %% register_secret is replicated across all nodes. - %% So, when starting a node with updated secret in the config, - %% we would replace stored secret on all nodes at once. - %% There could be a small race condition when dialback key checks would get rejected, - %% But there would not be conflicts when some nodes have one secret stored and others - another. - case {get_shared_secret(HostType), get_shared_secret_from_config(HostType)} of - {{error, not_found}, {ok, Secret}} -> - %% Write the secret from the config into Mnesia/CETS for the first time - register_secret(HostType, Secret); - {{error, not_found}, {error, not_found}} -> - %% Write a random secret into Mnesia/CETS for the first time - register_secret(HostType, make_random_secret()); - {{ok, Secret}, {ok, Secret}} -> - %% Config matches Mnesia/CETS - skip_same; - {{ok, _OldSecret}, {ok, NewSecret}} -> - ?LOG_INFO(#{what => overwrite_secret_from_config}), + case mongoose_s2s_lib:check_shared_secret(HostType, get_shared_secret(HostType)) of + {update, NewSecret} -> register_secret(HostType, NewSecret); - {{ok, _OldSecret}, {error, not_found}} -> - %% Keep the secret already stored in Mnesia/CETS - keep_existing - end. - --spec get_shared_secret_from_config(mongooseim:host_type()) -> {ok, base16_secret()} | {error, not_found}. -get_shared_secret_from_config(HostType) -> - mongoose_config:lookup_opt([{s2s, HostType}, shared]). - --spec make_random_secret() -> base16_secret(). -make_random_secret() -> - base16:encode(crypto:strong_rand_bytes(10)). - --spec lookup_certfile(mongooseim:host_type()) -> {ok, string()} | {error, not_found}. -lookup_certfile(HostType) -> - case mongoose_config:lookup_opt({domain_certfile, HostType}) of - {ok, CertFile} -> - CertFile; - {error, not_found} -> - mongoose_config:lookup_opt([{s2s, HostType}, certfile]) + ok -> + ok end. - %% Backend logic below: internal_database_init() -> @@ -448,9 +268,9 @@ get_s2s_out_pids(FromTo) -> mongoose_s2s_backend:get_s2s_out_pids(FromTo). %% Returns true if the connection is registered --spec call_try_register(Pid :: pid(), ShouldWriteF :: fun(), FromTo :: fromto()) -> boolean(). -call_try_register(Pid, ShouldWriteF, FromTo) -> - mongoose_s2s_backend:try_register(Pid, ShouldWriteF, FromTo). +-spec call_try_register(Pid :: pid(), FromTo :: fromto()) -> boolean(). +call_try_register(Pid, FromTo) -> + mongoose_s2s_backend:try_register(Pid, FromTo). call_node_cleanup(Node) -> mongoose_s2s_backend:node_cleanup(Node). diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index 6f8e2b8b15a..c6cad99c8b0 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -130,7 +130,7 @@ init([Socket, #{shaper := Shaper, tls := TLSOpts}]) -> ?LOG_DEBUG(#{what => s2s_in_started, text => <<"New incoming S2S connection">>, socket => Socket}), - Timer = erlang:start_timer(ejabberd_s2s:timeout(), self(), []), + Timer = erlang:start_timer(mongoose_s2s_lib:timeout(), self(), []), {ok, wait_for_stream, #state{socket = Socket, streamid = new_id(), @@ -293,7 +293,7 @@ wait_for_feature_request(closed, StateData) -> {stop, normal, StateData}. tls_options_with_certfile(#state{host_type = HostType, tls_options = TLSOptions}) -> - case ejabberd_s2s:lookup_certfile(HostType) of + case mongoose_s2s_lib:lookup_certfile(HostType) of {ok, CertFile} -> TLSOptions#{certfile => CertFile}; {error, not_found} -> TLSOptions end. @@ -301,7 +301,7 @@ tls_options_with_certfile(#state{host_type = HostType, tls_options = TLSOptions} -spec stream_established(ejabberd:xml_stream_item(), state()) -> fsm_return(). stream_established({xmlstreamelement, El}, StateData) -> cancel_timer(StateData#state.timer), - Timer = erlang:start_timer(ejabberd_s2s:timeout(), self(), []), + Timer = erlang:start_timer(mongoose_s2s_lib:timeout(), self(), []), case parse_key_packet(El) of %% We use LocalServer and RemoteServer instead of From and To to avoid confusion {db_result, FromTo, Id, Key} -> @@ -309,7 +309,7 @@ stream_established({xmlstreamelement, El}, StateData) -> from_to => FromTo, message_id => Id, key => Key}), %% Checks if the from domain is allowed and if the to %% domain is handled by this server: - case {ejabberd_s2s:allow_host(FromTo), is_local_host_known(FromTo)} of + case {mongoose_s2s_lib:allow_host(FromTo), is_local_host_known(FromTo)} of {true, true} -> ejabberd_s2s_out:terminate_if_waiting_delay(FromTo), StartType = {verify, self(), Key, StateData#state.streamid}, @@ -647,7 +647,7 @@ check_sasl_tls_certveify(false, _) -> check_auth_domain(error, _) -> false; check_auth_domain(AuthDomain, {ok, Cert}) -> - case ejabberd_s2s:domain_utf8_to_ascii(AuthDomain) of + case mongoose_s2s_lib:domain_utf8_to_ascii(AuthDomain) of false -> false; PCAuthDomain -> diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index b8337f4c536..8ef5be8522f 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -34,8 +34,8 @@ -export([start/2, start_link/2, start_connection/1, - terminate_if_waiting_delay/1, - stop_connection/1]). + stop_connection/1, + terminate_if_waiting_delay/1]). %% p1_fsm callbacks (same as gen_fsm) -export([init/1, @@ -150,17 +150,14 @@ start(FromTo, Type) -> supervisor:start_child(ejabberd_s2s_out_sup, [FromTo, Type]). - -spec start_link(ejabberd_s2s:fromto(), _) -> 'ignore' | {'error', _} | {'ok', pid()}. start_link(FromTo, Type) -> p1_fsm:start_link(ejabberd_s2s_out, [FromTo, Type], fsm_limit_opts() ++ ?FSMOPTS). - start_connection(Pid) -> p1_fsm:send_event(Pid, init). - stop_connection(Pid) -> p1_fsm:send_event(Pid, closed). @@ -198,7 +195,7 @@ init([{From, Server} = FromTo, Type]) -> start_connection(self()), {false, {Pid, Key, SID}} end, - Timer = erlang:start_timer(ejabberd_s2s:timeout(), self(), []), + Timer = erlang:start_timer(mongoose_s2s_lib:timeout(), self(), []), {ok, open_socket, #state{use_v10 = UseV10, tls = TLS, tls_required = TLSRequired, @@ -656,7 +653,7 @@ handle_info({send_element, Acc, El}, StateName, StateData) -> case StateName of stream_established -> cancel_timer(StateData#state.timer), - Timer = erlang:start_timer(ejabberd_s2s:timeout(), self(), []), + Timer = erlang:start_timer(mongoose_s2s_lib:timeout(), self(), []), send_element(StateData, El), {next_state, StateName, StateData#state{timer = Timer}}; %% In this state we bounce all message: We are waiting before @@ -896,7 +893,7 @@ db_verify_xml({LocalServer, RemoteServer}, Key, Id) -> -spec lookup_services(mongooseim:host_type(), jid:lserver()) -> [addr()]. lookup_services(HostType, Server) -> - case ejabberd_s2s:domain_utf8_to_ascii(Server) of + case mongoose_s2s_lib:domain_utf8_to_ascii(Server) of false -> []; ASCIIAddr -> do_lookup_services(HostType, ASCIIAddr) end. @@ -1147,7 +1144,7 @@ get_acc_with_new_tls(_, _, Acc) -> tls_options(HostType) -> Ciphers = mongoose_config:get_opt([{s2s, HostType}, ciphers]), Options = #{verify_mode => peer, ciphers => Ciphers}, - case ejabberd_s2s:lookup_certfile(HostType) of + case mongoose_s2s_lib:lookup_certfile(HostType) of {ok, CertFile} -> Options#{certfile => CertFile}; {error, not_found} -> Options end. diff --git a/src/s2s/mongoose_s2s_backend.erl b/src/s2s/mongoose_s2s_backend.erl index 7c61cd53f36..6633ce0a137 100644 --- a/src/s2s/mongoose_s2s_backend.erl +++ b/src/s2s/mongoose_s2s_backend.erl @@ -3,7 +3,6 @@ -callback init(map()) -> any(). -callback get_s2s_out_pids(ejabberd_s2s:fromto()) -> ejabberd_s2s:s2s_pids(). -callback try_register(Pid :: pid(), - ShouldWriteF :: fun(), FromTo :: ejabberd_s2s:fromto()) -> boolean(). -callback remove_connection(FromTo :: ejabberd_s2s:fromto(), Pid :: pid()) -> ok. -callback node_cleanup(Node :: node()) -> term(). @@ -14,7 +13,7 @@ -export([init/1, get_s2s_out_pids/1, - try_register/3, + try_register/2, remove_connection/2, node_cleanup/1]). @@ -37,10 +36,9 @@ get_s2s_out_pids(FromTo) -> %% Register ejabberd_s2s_out connection -spec try_register(Pid :: pid(), - ShouldWriteF :: fun(), FromTo :: ejabberd_s2s:fromto()) -> boolean(). -try_register(Pid, ShouldWriteF, FromTo) -> - mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [Pid, ShouldWriteF, FromTo]). +try_register(Pid, FromTo) -> + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [Pid, FromTo]). -spec remove_connection(FromTo :: ejabberd_s2s:fromto(), Pid :: pid()) -> ok. remove_connection(FromTo, Pid) -> diff --git a/src/s2s/mongoose_s2s_cets.erl b/src/s2s/mongoose_s2s_cets.erl index 0beff898b06..afbcbe41032 100644 --- a/src/s2s/mongoose_s2s_cets.erl +++ b/src/s2s/mongoose_s2s_cets.erl @@ -3,7 +3,7 @@ -export([init/1, get_s2s_out_pids/1, - try_register/3, + try_register/2, remove_connection/2, node_cleanup/1]). @@ -26,9 +26,9 @@ get_s2s_out_pids(FromTo) -> R = {{FromTo, '$1'}}, ets:select(?TABLE, [{R, [], ['$1']}]). -try_register(Pid, ShouldWriteF, FromTo) -> - L = get_s2s_out_pids(FromTo), - case ShouldWriteF(L) of +try_register(Pid, FromTo) -> + Pids = get_s2s_out_pids(FromTo), + case mongoose_s2s_lib:need_more_connections(FromTo, Pids) of true -> cets:insert(?TABLE, {{FromTo, Pid}}), true; diff --git a/src/s2s/mongoose_s2s_lib.erl b/src/s2s/mongoose_s2s_lib.erl new file mode 100644 index 00000000000..8892dcf304f --- /dev/null +++ b/src/s2s/mongoose_s2s_lib.erl @@ -0,0 +1,217 @@ +%% Library functions without side effects. +%% These functions do not change the state of the system or send any messages. +%% These functions do not write into Mnesia/CETS or read from it. +%% They could read the configuration table though. +%% There is one hook `mongoose_hooks:s2s_allow_host', that could cause some side effects +%% (it depends on the hook handlers). +-module(mongoose_s2s_lib). +-export([make_from_to/2, + timeout/0, + domain_utf8_to_ascii/1, + check_shared_secret/2, + lookup_certfile/1, + choose_pid/2, + need_more_connections/2, + needed_extra_connections_number_if_allowed/2, + allow_host/1, + commands/0]). + +-include("mongoose.hrl"). +-include("jlib.hrl"). +-include("ejabberd_commands.hrl"). + +-type fromto() :: ejabberd_s2s:fromto(). +-type s2s_pids() :: ejabberd_s2s:s2s_pids(). + +-define(DEFAULT_MAX_S2S_CONNECTIONS, 1). +-define(DEFAULT_MAX_S2S_CONNECTIONS_PER_NODE, 1). + +-spec make_from_to(From :: jid:jid(), To :: jid:jid()) -> fromto(). +make_from_to(#jid{lserver = FromServer}, #jid{lserver = ToServer}) -> + {FromServer, ToServer}. + +timeout() -> + 600000. + +%% Converts a UTF-8 domain to ASCII (IDNA) +-spec domain_utf8_to_ascii(jid:server()) -> jid:server() | false. +domain_utf8_to_ascii(Domain) -> + case catch idna:utf8_to_ascii(Domain) of + {'EXIT', _} -> + false; + AsciiDomain -> + list_to_binary(AsciiDomain) + end. + +-spec check_shared_secret(HostType, StoredSecretResult) -> ok | {update, NewSecret} when + HostType :: mongooseim:host_type(), + StoredSecretResult :: {ok, ejabberd_s2s:base16_secret()} | {error, not_found}, + NewSecret :: ejabberd_s2s:base16_secret(). +check_shared_secret(HostType, StoredSecretResult) -> + %% register_secret is replicated across all nodes. + %% So, when starting a node with updated secret in the config, + %% we would replace stored secret on all nodes at once. + %% There could be a small race condition when dialback key checks would get rejected, + %% But there would not be conflicts when some nodes have one secret stored and others - another. + case {StoredSecretResult, get_shared_secret_from_config(HostType)} of + {{error, not_found}, {ok, Secret}} -> + %% Write the secret from the config into Mnesia/CETS for the first time + {update, Secret}; + {{error, not_found}, {error, not_found}} -> + %% Write a random secret into Mnesia/CETS for the first time + {update, make_random_secret()}; + {{ok, Secret}, {ok, Secret}} -> + %% Config matches Mnesia/CETS + ok; + {{ok, _OldSecret}, {ok, NewSecret}} -> + ?LOG_INFO(#{what => overwrite_secret_from_config}), + {update, NewSecret}; + {{ok, _OldSecret}, {error, not_found}} -> + %% Keep the secret already stored in Mnesia/CETS + ok + end. + +-spec make_random_secret() -> ejabberd_s2s:base16_secret(). +make_random_secret() -> + base16:encode(crypto:strong_rand_bytes(10)). + +-spec get_shared_secret_from_config(mongooseim:host_type()) -> + {ok, ejabberd_s2s:base16_secret()} | {error, not_found}. +get_shared_secret_from_config(HostType) -> + mongoose_config:lookup_opt([{s2s, HostType}, shared]). + +-spec lookup_certfile(mongooseim:host_type()) -> {ok, string()} | {error, not_found}. +lookup_certfile(HostType) -> + case mongoose_config:lookup_opt({domain_certfile, HostType}) of + {ok, CertFile} -> + CertFile; + {error, not_found} -> + mongoose_config:lookup_opt([{s2s, HostType}, certfile]) + end. + +%% Prefers the local connection (i.e. not on the remote node) +-spec choose_pid(From :: jid:jid(), Pids :: s2s_pids()) -> pid(). +choose_pid(From, [_|_] = Pids) -> + Pids1 = case filter_local_pids(Pids) of + [] -> Pids; + FilteredPids -> FilteredPids + end, + % Use sticky connections based on the JID of the sender + % (without the resource to ensure that a muc room always uses the same connection) + Pid = lists:nth(erlang:phash2(jid:to_bare(From), length(Pids1)) + 1, Pids1), + ?LOG_DEBUG(#{what => s2s_choose_pid, from => From, s2s_pid => Pid}), + Pid. + +%% Returns only pids from the current node. +-spec filter_local_pids(s2s_pids()) -> s2s_pids(). +filter_local_pids(Pids) -> + Node = node(), + [Pid || Pid <- Pids, node(Pid) == Node]. + +-spec max_s2s_connections(fromto()) -> pos_integer(). +max_s2s_connections(FromTo) -> + match_integer_acl_rule(FromTo, max_s2s_connections, + ?DEFAULT_MAX_S2S_CONNECTIONS). + +-spec max_s2s_connections_per_node(fromto()) -> pos_integer(). +max_s2s_connections_per_node(FromTo) -> + match_integer_acl_rule(FromTo, max_s2s_connections_per_node, + ?DEFAULT_MAX_S2S_CONNECTIONS_PER_NODE). + +-spec match_integer_acl_rule(fromto(), atom(), integer()) -> term(). +match_integer_acl_rule({FromServer, ToServer}, Rule, Default) -> + {ok, HostType} = mongoose_domain_api:get_host_type(FromServer), + ToServerJid = jid:make(<<>>, ToServer, <<>>), + case acl:match_rule(HostType, Rule, ToServerJid) of + Int when is_integer(Int) -> Int; + _ -> Default + end. + +-spec needed_extra_connections_number_if_allowed(fromto(), s2s_pids()) -> non_neg_integer(). +needed_extra_connections_number_if_allowed(FromTo, OldCons) -> + case is_s2s_allowed_for_host(FromTo, OldCons) of + true -> + needed_extra_connections_number(FromTo, OldCons); + false -> + 0 + end. + +%% Checks: +%% - if the host is not a service +%% - and if the s2s host is not blacklisted or is in whitelist +-spec is_s2s_allowed_for_host(fromto(), _OldConnections :: s2s_pids()) -> boolean(). +is_s2s_allowed_for_host(_FromTo, [_|_]) -> + true; %% Has outgoing connections established, skip the check +is_s2s_allowed_for_host(FromTo, []) -> + not is_service(FromTo) andalso allow_host(FromTo). + +%% Check if host is in blacklist or white list +%% Runs a hook +-spec allow_host(fromto()) -> boolean(). +allow_host({FromServer, ToServer}) -> + case mongoose_domain_api:get_host_type(FromServer) of + {error, not_found} -> + false; + {ok, HostType} -> + case mongoose_config:lookup_opt([{s2s, HostType}, host_policy, ToServer]) of + {ok, allow} -> + true; + {ok, deny} -> + false; + {error, not_found} -> + mongoose_config:get_opt([{s2s, HostType}, default_policy]) =:= allow + andalso mongoose_hooks:s2s_allow_host(FromServer, ToServer) =:= allow + end + end. + +-spec need_more_connections(fromto(), s2s_pids()) -> boolean(). +need_more_connections(FromTo, Connections) -> + needed_extra_connections_number(FromTo, Connections) > 0. + +-spec needed_extra_connections_number(fromto(), s2s_pids()) -> non_neg_integer(). +needed_extra_connections_number(FromTo, Connections) -> + MaxConnections = max_s2s_connections(FromTo), + MaxConnectionsPerNode = max_s2s_connections_per_node(FromTo), + LocalPids = filter_local_pids(Connections), + lists:min([MaxConnections - length(Connections), + MaxConnectionsPerNode - length(LocalPids)]). + +%% Returns true if the destination must be considered as a service. +-spec is_service(ejabberd_s2s:fromto()) -> boolean(). +is_service({FromServer, ToServer} = _FromTo) -> + case mongoose_config:lookup_opt({route_subdomains, FromServer}) of + {ok, s2s} -> % bypass RFC 3920 10.3 + false; + {error, not_found} -> + Hosts = ?MYHOSTS, + P = fun(ParentDomain) -> lists:member(ParentDomain, Hosts) end, + lists:any(P, parent_domains(ToServer)) + end. + +-spec parent_domains(jid:lserver()) -> [jid:lserver()]. +parent_domains(Domain) -> + parent_domains(Domain, [Domain]). + +parent_domains(<<>>, Acc) -> + lists:reverse(Acc); +parent_domains(<<$., Rest/binary>>, Acc) -> + parent_domains(Rest, [Rest | Acc]); +parent_domains(<<_, Rest/binary>>, Acc) -> + parent_domains(Rest, Acc). + +-spec commands() -> [ejabberd_commands:cmd()]. +commands() -> + [ + #ejabberd_commands{name = incoming_s2s_number, + tags = [stats, s2s], + desc = "Number of incoming s2s connections on the node", + module = stats_api, function = incoming_s2s_number, + args = [], + result = {s2s_incoming, integer}}, + #ejabberd_commands{name = outgoing_s2s_number, + tags = [stats, s2s], + desc = "Number of outgoing s2s connections on the node", + module = stats_api, function = outgoing_s2s_number, + args = [], + result = {s2s_outgoing, integer}} + ]. diff --git a/src/s2s/mongoose_s2s_mnesia.erl b/src/s2s/mongoose_s2s_mnesia.erl index 582c473a67d..73f47930f1a 100644 --- a/src/s2s/mongoose_s2s_mnesia.erl +++ b/src/s2s/mongoose_s2s_mnesia.erl @@ -3,7 +3,7 @@ -export([init/1, get_s2s_out_pids/1, - try_register/3, + try_register/2, remove_connection/2, node_cleanup/1]). @@ -33,10 +33,10 @@ init_pids() -> get_s2s_out_pids(FromTo) -> s2s_to_pids(mnesia:dirty_read(s2s, FromTo)). -try_register(Pid, ShouldWriteF, FromTo) -> +try_register(Pid, FromTo) -> F = fun() -> - L = get_s2s_out_pids(FromTo), - case ShouldWriteF(L) of + Pids = get_s2s_out_pids(FromTo), + case mongoose_s2s_lib:need_more_connections(FromTo, Pids) of true -> mnesia:write(#s2s{fromto = FromTo, pid = Pid}), true; From e4e33838fa235e8e7b19e4e1b5bcc7d82aa3ca24 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 09:18:15 +0200 Subject: [PATCH 088/161] Remove SUPERVISOR_START macro --- src/ejabberd_s2s.erl | 2 +- src/ejabberd_s2s_in.erl | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index 0afe4ab4794..0e8f8c2ba05 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -283,6 +283,6 @@ remove_connection(FromTo, Pid) -> get_shared_secret(HostType) -> mongoose_s2s_backend:get_shared_secret(HostType). --spec register_secret(mongooseim:host_type(), ejabberd_s2s:base16_secret()) -> ok. +-spec register_secret(mongooseim:host_type(), base16_secret()) -> ok. register_secret(HostType, Secret) -> mongoose_s2s_backend:register_secret(HostType, Secret). diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index c6cad99c8b0..281492eda7c 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -83,9 +83,6 @@ -define(FSMOPTS, []). -endif. --define(SUPERVISOR_START, supervisor:start_child(ejabberd_s2s_in_sup, - [Socket, Opts])). - -define(STREAM_HEADER(Version), (<<"" " {error, _} | {ok, undefined | pid()} | {ok, undefined | pid(), _}. start(Socket, Opts) -> - ?SUPERVISOR_START. + supervisor:start_child(ejabberd_s2s_in_sup, [Socket, Opts]). -spec start_link(socket(), options()) -> ignore | {error, _} | {ok, pid()}. start_link(Socket, Opts) -> From 9bf41794efcc8f5e927da6c3fd71021ac302ab64 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 09:24:14 +0200 Subject: [PATCH 089/161] Add missing specs --- src/s2s/mongoose_s2s_backend.erl | 4 ++-- src/s2s/mongoose_s2s_cets.erl | 12 +++++++++++- src/s2s/mongoose_s2s_mnesia.erl | 12 +++++++++++- 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/src/s2s/mongoose_s2s_backend.erl b/src/s2s/mongoose_s2s_backend.erl index 6633ce0a137..30bd9661fe8 100644 --- a/src/s2s/mongoose_s2s_backend.erl +++ b/src/s2s/mongoose_s2s_backend.erl @@ -30,7 +30,7 @@ init(Opts) -> mongoose_backend:init(global, ?MAIN_MODULE, [], Opts), mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). --spec get_s2s_out_pids(ejabberd_s2s:fromto()) -> [pid()]. +-spec get_s2s_out_pids(ejabberd_s2s:fromto()) -> ejabberd_s2s:s2s_pids(). get_s2s_out_pids(FromTo) -> mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [FromTo]). @@ -44,7 +44,7 @@ try_register(Pid, FromTo) -> remove_connection(FromTo, Pid) -> mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [FromTo, Pid]). --spec node_cleanup(Node :: node()) -> term(). +-spec node_cleanup(Node :: node()) -> ok. node_cleanup(Node) -> mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [Node]). diff --git a/src/s2s/mongoose_s2s_cets.erl b/src/s2s/mongoose_s2s_cets.erl index afbcbe41032..80810330726 100644 --- a/src/s2s/mongoose_s2s_cets.erl +++ b/src/s2s/mongoose_s2s_cets.erl @@ -22,10 +22,13 @@ init(_) -> cets_discovery:add_table(mongoose_cets_discovery, ?SECRET_TABLE). %% Pid lists +-spec get_s2s_out_pids(ejabberd_s2s:fromto()) -> ejabberd_s2s:s2s_pids(). get_s2s_out_pids(FromTo) -> R = {{FromTo, '$1'}}, ets:select(?TABLE, [{R, [], ['$1']}]). +-spec try_register(Pid :: pid(), + FromTo :: ejabberd_s2s:fromto()) -> boolean(). try_register(Pid, FromTo) -> Pids = get_s2s_out_pids(FromTo), case mongoose_s2s_lib:need_more_connections(FromTo, Pids) of @@ -36,22 +39,29 @@ try_register(Pid, FromTo) -> false end. +-spec remove_connection(FromTo :: ejabberd_s2s:fromto(), Pid :: pid()) -> ok. remove_connection(FromTo, Pid) -> cets:delete(?TABLE, {FromTo, Pid}), ok. %% node_cleanup is called on each node in the cluster, when Node is down +-spec node_cleanup(Node :: node()) -> ok. node_cleanup(Node) -> KeyPattern = {'_', '$1'}, R = {KeyPattern}, Guard = {'==', {node, '$1'}, Node}, - ets:select_delete(?TABLE, [{R, [Guard], [true]}]). + ets:select_delete(?TABLE, [{R, [Guard], [true]}]), + ok. %% Secrets +-spec register_secret(HostType :: mongooseim:host_type(), + Secret :: ejabberd_s2s:base16_secret()) -> ok. register_secret(HostType, Secret) -> cets:insert(?SECRET_TABLE, {HostType, Secret}), ok. +-spec get_shared_secret(mongooseim:host_type()) -> + {ok, ejabberd_s2s:base16_secret()} | {error, not_found}. get_shared_secret(HostType) -> case ets:lookup(?SECRET_TABLE, HostType) of [{_HostType, Secret}] -> diff --git a/src/s2s/mongoose_s2s_mnesia.erl b/src/s2s/mongoose_s2s_mnesia.erl index 73f47930f1a..1c3114e2936 100644 --- a/src/s2s/mongoose_s2s_mnesia.erl +++ b/src/s2s/mongoose_s2s_mnesia.erl @@ -30,9 +30,12 @@ init_pids() -> mnesia:create_table(s2s, Opts), mnesia:add_table_copy(s2s, node(), ram_copies). +-spec get_s2s_out_pids(ejabberd_s2s:fromto()) -> ejabberd_s2s:s2s_pids(). get_s2s_out_pids(FromTo) -> s2s_to_pids(mnesia:dirty_read(s2s, FromTo)). +-spec try_register(Pid :: pid(), + FromTo :: ejabberd_s2s:fromto()) -> boolean(). try_register(Pid, FromTo) -> F = fun() -> Pids = get_s2s_out_pids(FromTo), @@ -54,6 +57,7 @@ try_register(Pid, FromTo) -> false end. +-spec remove_connection(FromTo :: ejabberd_s2s:fromto(), Pid :: pid()) -> ok. remove_connection(FromTo, Pid) -> Rec = #s2s{fromto = FromTo, pid = Pid}, F = fun() -> @@ -68,6 +72,7 @@ remove_connection(FromTo, Pid) -> ok end. +-spec node_cleanup(Node :: node()) -> ok. node_cleanup(Node) -> F = fun() -> Es = mnesia:select( @@ -79,7 +84,8 @@ node_cleanup(Node) -> mnesia:delete_object(E) end, Es) end, - mnesia:async_dirty(F). + mnesia:async_dirty(F), + ok. s2s_to_pids(List) -> [Pid || #s2s{pid = Pid} <- List]. @@ -90,11 +96,15 @@ init_secrets() -> mnesia:create_table(s2s_secret, Opts), mnesia:add_table_copy(s2s_secret, node(), ram_copies). +-spec register_secret(HostType :: mongooseim:host_type(), + Secret :: ejabberd_s2s:base16_secret()) -> ok. register_secret(HostType, Secret) -> Rec = #s2s_secret{host_type = HostType, secret = Secret}, {atomic, _} = mnesia:transaction(fun() -> mnesia:write(Rec) end), ok. +-spec get_shared_secret(mongooseim:host_type()) -> + {ok, ejabberd_s2s:base16_secret()} | {error, not_found}. get_shared_secret(HostType) -> case mnesia:dirty_read(s2s_secret, HostType) of [#s2s_secret{secret = Secret}] -> From 16d18299b14cfb24b426a50df4052af9cf10aaea Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 14:05:54 +0200 Subject: [PATCH 090/161] Rename Mnesia table back to s2s_shared --- src/s2s/mongoose_s2s_mnesia.erl | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/src/s2s/mongoose_s2s_mnesia.erl b/src/s2s/mongoose_s2s_mnesia.erl index 1c3114e2936..5a90192af14 100644 --- a/src/s2s/mongoose_s2s_mnesia.erl +++ b/src/s2s/mongoose_s2s_mnesia.erl @@ -11,11 +11,14 @@ get_shared_secret/1]). -record(s2s, { - fromto :: ejabberd_s2s:fromto() | '_', - pid :: pid() | '$1' - }). + fromto :: ejabberd_s2s:fromto() | '_', + pid :: pid() | '$1' + }). --record(s2s_secret, {host_type, secret}). +-record(s2s_shared, { + host_type :: mongooseim:host_type(), + secret :: mongooseim:base16_secret() + }). -include("mongoose_logger.hrl"). @@ -92,22 +95,22 @@ s2s_to_pids(List) -> %% Secrets init_secrets() -> - Opts = [{ram_copies, [node()]}, {attributes, record_info(fields, s2s_secret)}], - mnesia:create_table(s2s_secret, Opts), - mnesia:add_table_copy(s2s_secret, node(), ram_copies). + Opts = [{ram_copies, [node()]}, {attributes, record_info(fields, s2s_shared)}], + mnesia:create_table(s2s_shared, Opts), + mnesia:add_table_copy(s2s_shared, node(), ram_copies). -spec register_secret(HostType :: mongooseim:host_type(), Secret :: ejabberd_s2s:base16_secret()) -> ok. register_secret(HostType, Secret) -> - Rec = #s2s_secret{host_type = HostType, secret = Secret}, + Rec = #s2s_shared{host_type = HostType, secret = Secret}, {atomic, _} = mnesia:transaction(fun() -> mnesia:write(Rec) end), ok. -spec get_shared_secret(mongooseim:host_type()) -> {ok, ejabberd_s2s:base16_secret()} | {error, not_found}. get_shared_secret(HostType) -> - case mnesia:dirty_read(s2s_secret, HostType) of - [#s2s_secret{secret = Secret}] -> + case mnesia:dirty_read(s2s_shared, HostType) of + [#s2s_shared{secret = Secret}] -> {ok, Secret}; [] -> {error, not_found} From 7f29448014181ef6e7517c181574a3fa1b0b047b Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 14:22:09 +0200 Subject: [PATCH 091/161] Add conflict resolver for secrets in mongoose_s2s_cets --- rebar.config | 2 +- src/s2s/mongoose_s2s_cets.erl | 28 +++++++++++++++++++++++++--- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/rebar.config b/rebar.config index 080e81f6c49..ac88130d5fe 100644 --- a/rebar.config +++ b/rebar.config @@ -80,7 +80,7 @@ {cache_tab, "1.0.30"}, {segmented_cache, "0.3.0"}, {worker_pool, "6.0.1"}, - {cets, {git, "https://github.com/esl/cets.git", {branch, "main"}}}, + {cets, {git, "https://github.com/esl/cets.git", {branch, "mu-conflict-handler"}}}, %%% HTTP tools {graphql, {git, "https://github.com/esl/graphql-erlang.git", {branch, "master"}}}, diff --git a/src/s2s/mongoose_s2s_cets.erl b/src/s2s/mongoose_s2s_cets.erl index 80810330726..60b20ccd4f6 100644 --- a/src/s2s/mongoose_s2s_cets.erl +++ b/src/s2s/mongoose_s2s_cets.erl @@ -10,6 +10,9 @@ -export([register_secret/2, get_shared_secret/1]). +%% Internal usage (export so the callback would survive multiple code reloads) +-export([handle_secret_conflict/2]). + -include("mongoose_logger.hrl"). -define(TABLE, cets_s2s_session). @@ -17,10 +20,25 @@ init(_) -> cets:start(?TABLE, #{}), - cets:start(?SECRET_TABLE, #{}), + %% Non-random, non-node-specific keys + %% This means that default merging would not work + cets:start(?SECRET_TABLE, #{handle_conflict => fun ?MODULE:handle_secret_conflict/2}), cets_discovery:add_table(mongoose_cets_discovery, ?TABLE), cets_discovery:add_table(mongoose_cets_discovery, ?SECRET_TABLE). +%% Store the most recent value: +%% - first element of the tuple is the same and it is the key. +%% - second element is a timestamp, so comparing tuples works. +%% Even if we choose the wrong record - nothing bad would happen +%% (we still need to choose one). +%% Choosing the record with the highest timestamp is just a logical behaviour +%% (it also matches the logic of mongoose_s2s_lib:check_shared_secret/2, where updated secret +%% in the config is updated across all nodes in the cluster). +handle_secret_conflict(Rec1, Rec2) when Rec1 > Rec2 -> + Rec1; +handle_secret_conflict(_Rec1, Rec2) -> + Rec2. + %% Pid lists -spec get_s2s_out_pids(ejabberd_s2s:fromto()) -> ejabberd_s2s:s2s_pids(). get_s2s_out_pids(FromTo) -> @@ -57,14 +75,18 @@ node_cleanup(Node) -> -spec register_secret(HostType :: mongooseim:host_type(), Secret :: ejabberd_s2s:base16_secret()) -> ok. register_secret(HostType, Secret) -> - cets:insert(?SECRET_TABLE, {HostType, Secret}), + %% We store timestamp so we could use it when merging two tables when clustering. + %% Secrets is a very small table and get_shared_secret is called rarely, + %% so having an extra field is not a problem. + TS = erlang:system_time(microsecond), + cets:insert(?SECRET_TABLE, {HostType, TS, Secret}), ok. -spec get_shared_secret(mongooseim:host_type()) -> {ok, ejabberd_s2s:base16_secret()} | {error, not_found}. get_shared_secret(HostType) -> case ets:lookup(?SECRET_TABLE, HostType) of - [{_HostType, Secret}] -> + [{_HostType, _TS, Secret}] -> {ok, Secret}; [] -> {error, not_found} From 933b7091ebbe1c50df872f177b384df66c32b621 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 14:43:48 +0200 Subject: [PATCH 092/161] Improve specs and code style --- src/ejabberd_s2s.erl | 22 +++++++++------------- src/s2s/mongoose_s2s_backend.erl | 4 ++-- src/s2s/mongoose_s2s_cets.erl | 4 +++- src/s2s/mongoose_s2s_mnesia.erl | 4 +++- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index 0e8f8c2ba05..6d23b47ea6c 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -31,13 +31,13 @@ -behaviour(gen_server). -behaviour(xmpp_router). -%% API +%% API functions -export([start_link/0, filter/4, route/4, key/3, - get_s2s_out_pids/1, try_register/1, + get_s2s_out_pids/1, remove_connection/2]). %% Hooks callbacks @@ -67,9 +67,7 @@ -export_type([fromto/0, s2s_pids/0, base16_secret/0]). -%%==================================================================== -%% API -%%==================================================================== +%% API functions %% Starts the server -spec start_link() -> ignore | {error, _} | {ok, pid()}. @@ -107,18 +105,14 @@ key(HostType, {From, To}, StreamID) -> HMac = crypto:mac(hmac, sha256, SecretHashed, [From, " ", To, " ", StreamID]), base16:encode(HMac). -%%==================================================================== %% Hooks callbacks -%%==================================================================== -spec node_cleanup(map(), map(), map()) -> {ok, map()}. node_cleanup(Acc, #{node := Node}, _) -> Res = call_node_cleanup(Node), {ok, maps:put(?MODULE, Res, Acc)}. -%%==================================================================== %% gen_server callbacks -%%==================================================================== init([]) -> internal_database_init(), @@ -147,9 +141,7 @@ terminate(_Reason, _State) -> code_change(_OldVsn, State, _Extra) -> {ok, State}. -%%-------------------------------------------------------------------- %%% Internal functions -%%-------------------------------------------------------------------- -spec hooks() -> [gen_hook:hook_tuple()]. hooks() -> [{node_cleanup, global, fun ?MODULE:node_cleanup/3, #{}, 50}]. @@ -208,7 +200,8 @@ find_connection(From, To) -> %% Returns an updated list of connections. -spec ensure_enough_connections(fromto(), s2s_pids()) -> s2s_pids(). ensure_enough_connections(FromTo, OldCons) -> - NeededConnections = mongoose_s2s_lib:needed_extra_connections_number_if_allowed(FromTo, OldCons), + NeededConnections = + mongoose_s2s_lib:needed_extra_connections_number_if_allowed(FromTo, OldCons), case NeededConnections of 0 -> OldCons; @@ -248,6 +241,7 @@ set_shared_secret() -> [set_shared_secret(HostType) || HostType <- ?ALL_HOST_TYPES], ok. +%% Updates the secret across the cluster if needed set_shared_secret(HostType) -> case mongoose_s2s_lib:check_shared_secret(HostType, get_shared_secret(HostType)) of {update, NewSecret} -> @@ -256,8 +250,9 @@ set_shared_secret(HostType) -> ok end. -%% Backend logic below: +%% Backend logic functions +-spec internal_database_init() -> ok. internal_database_init() -> Backend = mongoose_config:get_opt(s2s_backend), mongoose_s2s_backend:init(#{backend => Backend}). @@ -272,6 +267,7 @@ get_s2s_out_pids(FromTo) -> call_try_register(Pid, FromTo) -> mongoose_s2s_backend:try_register(Pid, FromTo). +-spec call_try_register(Node :: node()) -> ok. call_node_cleanup(Node) -> mongoose_s2s_backend:node_cleanup(Node). diff --git a/src/s2s/mongoose_s2s_backend.erl b/src/s2s/mongoose_s2s_backend.erl index 30bd9661fe8..20c3982eefe 100644 --- a/src/s2s/mongoose_s2s_backend.erl +++ b/src/s2s/mongoose_s2s_backend.erl @@ -1,6 +1,6 @@ -module(mongoose_s2s_backend). --callback init(map()) -> any(). +-callback init(map()) -> ok. -callback get_s2s_out_pids(ejabberd_s2s:fromto()) -> ejabberd_s2s:s2s_pids(). -callback try_register(Pid :: pid(), FromTo :: ejabberd_s2s:fromto()) -> boolean(). @@ -24,7 +24,7 @@ -define(MAIN_MODULE, mongoose_s2s). --spec init(map()) -> any(). +-spec init(map()) -> ok. init(Opts) -> Args = [Opts], mongoose_backend:init(global, ?MAIN_MODULE, [], Opts), diff --git a/src/s2s/mongoose_s2s_cets.erl b/src/s2s/mongoose_s2s_cets.erl index 60b20ccd4f6..536a4fc6dca 100644 --- a/src/s2s/mongoose_s2s_cets.erl +++ b/src/s2s/mongoose_s2s_cets.erl @@ -18,13 +18,15 @@ -define(TABLE, cets_s2s_session). -define(SECRET_TABLE, cets_s2s_secret). +-spec init(map()) -> ok. init(_) -> cets:start(?TABLE, #{}), %% Non-random, non-node-specific keys %% This means that default merging would not work cets:start(?SECRET_TABLE, #{handle_conflict => fun ?MODULE:handle_secret_conflict/2}), cets_discovery:add_table(mongoose_cets_discovery, ?TABLE), - cets_discovery:add_table(mongoose_cets_discovery, ?SECRET_TABLE). + cets_discovery:add_table(mongoose_cets_discovery, ?SECRET_TABLE), + ok. %% Store the most recent value: %% - first element of the tuple is the same and it is the key. diff --git a/src/s2s/mongoose_s2s_mnesia.erl b/src/s2s/mongoose_s2s_mnesia.erl index 5a90192af14..1d804815e9a 100644 --- a/src/s2s/mongoose_s2s_mnesia.erl +++ b/src/s2s/mongoose_s2s_mnesia.erl @@ -22,9 +22,11 @@ -include("mongoose_logger.hrl"). +-spec init(map()) -> ok. init(_) -> init_pids(), - init_secrets(). + init_secrets(), + ok. %% Pid lists init_pids() -> From b34821d5d9f9ffe79f518dffec61aae5ebf4dc17 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 15:46:12 +0200 Subject: [PATCH 093/161] Play with dialback function names --- src/ejabberd_s2s_in.erl | 47 +++++++++++++++++++++++----------------- src/ejabberd_s2s_out.erl | 32 +++++++++++++-------------- 2 files changed, 43 insertions(+), 36 deletions(-) diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index 281492eda7c..6c445e9873c 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -299,9 +299,11 @@ tls_options_with_certfile(#state{host_type = HostType, tls_options = TLSOptions} stream_established({xmlstreamelement, El}, StateData) -> cancel_timer(StateData#state.timer), Timer = erlang:start_timer(mongoose_s2s_lib:timeout(), self(), []), - case parse_key_packet(El) of - %% We use LocalServer and RemoteServer instead of From and To to avoid confusion - {db_result, FromTo, Id, Key} -> + case parse_dialback_with_key(El) of + %% Incoming dialback key, we have to verify it using ejabberd_s2s_out before + %% accepting any incoming stanzas + %% (we have to receive an event `{valid, FromTo}' from `ejabberd_s2s_out' process). + {dialback_result, FromTo, Id, Key} -> ?LOG_DEBUG(#{what => s2s_in_get_key, from_to => FromTo, message_id => Id, key => Key}), %% Checks if the from domain is allowed and if the to @@ -310,6 +312,8 @@ stream_established({xmlstreamelement, El}, StateData) -> {true, true} -> ejabberd_s2s_out:terminate_if_waiting_delay(FromTo), StartType = {verify, self(), Key, StateData#state.streamid}, + %% Could we reuse an existing ejabberd_s2s_out connection + %% instead of making a new one? ejabberd_s2s_out:start(FromTo, StartType), Conns = maps:put(FromTo, wait_for_verification, StateData#state.connections), @@ -326,14 +330,16 @@ stream_established({xmlstreamelement, El}, StateData) -> ?LOG_WARNING(#{what => s2s_in_key_with_invalid_from}), {stop, normal, StateData} end; - {db_verify, FromTo, Id, Key} -> + %% Incoming dialback verification request + %% We have to check it using secrets and reply if it is valid or not + {dialback_verify, FromTo, Id, Key} -> ?LOG_DEBUG(#{what => s2s_in_verify_key, from_to => FromTo, message_id => Id, key => Key}), Type = case ejabberd_s2s:key(StateData#state.host_type, FromTo, Id) of Key -> <<"valid">>; _ -> <<"invalid">> end, - send_element(StateData, db_verify_xml(FromTo, Id, Type)), + send_element(StateData, dialback_verify_type_xml(FromTo, Id, Type)), {next_state, stream_established, StateData#state{timer = Timer}}; false -> Res = parse_and_route_incoming_stanza(El, StateData), @@ -342,13 +348,13 @@ stream_established({xmlstreamelement, El}, StateData) -> end; %% An event from ejabberd_s2s_out stream_established({valid, FromTo}, StateData) -> - send_element(StateData, db_result_xml(FromTo, <<"valid">>)), + send_element(StateData, dialback_result_type_xml(FromTo, <<"valid">>)), Cons = maps:put(FromTo, established, StateData#state.connections), NSD = StateData#state{connections = Cons}, {next_state, stream_established, NSD}; %% An event from ejabberd_s2s_out stream_established({invalid, FromTo}, StateData) -> - send_element(StateData, db_result_xml(FromTo, <<"invalid">>)), + send_element(StateData, dialback_result_type_xml(FromTo, <<"invalid">>)), Cons = maps:remove(FromTo, StateData#state.connections), NSD = StateData#state{connections = Cons}, {next_state, stream_established, NSD}; @@ -417,7 +423,7 @@ same_auth_domain({_, LRemoteServer}, #state{auth_domain = AuthDomain}) -> -spec is_s2s_connected(ejabberd_s2s:fromto(), #state{}) -> boolean(). is_s2s_connected(FromTo, StateData) -> - {ok, established} =:= maps:find(FromTo, StateData#state.connections). + established =:= maps:get(FromTo, StateData#state.connections, false). -spec is_valid_stanza(exml:element()) -> boolean(). is_valid_stanza(#xmlel{name = Name}) -> @@ -557,34 +563,35 @@ cancel_timer(Timer) -> %% XEP-0185: Dialback Key Generation and Validation %% DB means dial-back %% Receiving Server is Informed by Authoritative Server that Key is Valid or Invalid (Step 3) --spec db_verify_xml(ejabberd_s2s:fromto(), binary(), binary()) -> exml:element(). -db_verify_xml({LocalServer, RemoteServer}, Id, Type) -> +-spec dialback_verify_type_xml(ejabberd_s2s:fromto(), binary(), binary()) -> exml:element(). +dialback_verify_type_xml({LocalServer, RemoteServer}, Id, Type) -> #xmlel{name = <<"db:verify">>, attrs = [{<<"from">>, LocalServer}, {<<"to">>, RemoteServer}, {<<"id">>, Id}, {<<"type">>, Type}]}. --spec db_result_xml(ejabberd_s2s:fromto(), binary()) -> exml:element(). -db_result_xml({LocalServer, RemoteServer}, Type) -> +-spec dialback_result_type_xml(ejabberd_s2s:fromto(), binary()) -> exml:element(). +dialback_result_type_xml({LocalServer, RemoteServer}, Type) -> %% Receiving Server Sends Valid or Invalid Verification Result to Initiating Server (Step 4) #xmlel{name = <<"db:result">>, attrs = [{<<"from">>, LocalServer}, {<<"to">>, RemoteServer}, {<<"type">>, Type}]}. --spec parse_key_packet(exml:element()) -> false - | {db_result | db_verify, FromTo :: ejabberd_s2s:fromto(), Id :: binary(), Key :: binary()}. -parse_key_packet(El = #xmlel{name = <<"db:result">>}) -> +-spec parse_dialback_with_key(exml:element()) -> false + | {dialback_result | dialback_verify, + FromTo :: ejabberd_s2s:fromto(), Id :: binary(), Key :: binary()}. +parse_dialback_with_key(El = #xmlel{name = <<"db:result">>}) -> %% Initiating Server Sends Dialback Key (Step 1) - parsed_key_packet(db_result, El); -parse_key_packet(El = #xmlel{name = <<"db:verify">>}) -> + parsed_dialback_with_key(dialback_result, El); +parse_dialback_with_key(El = #xmlel{name = <<"db:verify">>}) -> %% Receiving Server Sends Verification Request to Authoritative Server (Step 2) - parsed_key_packet(db_verify, El); -parse_key_packet(_) -> + parsed_dialback_with_key(dialback_verify, El); +parse_dialback_with_key(_) -> false. -parsed_key_packet(Type, El) -> +parsed_dialback_with_key(Type, El) -> FromTo = parse_from_to(El), Id = exml_query:attr(El, <<"id">>, <<>>), Key = exml_query:cdata(El), diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index 8ef5be8522f..ca9b8a94801 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -327,7 +327,7 @@ wait_for_stream(closed, StateData) -> -spec wait_for_validation(ejabberd:xml_stream_item(), state()) -> fsm_return(). wait_for_validation({xmlstreamelement, El}, StateData) -> - case parse_verify_result(El) of + case parse_dialback_with_type(El) of {db_result, To, From, Id, Type} -> ?LOG_DEBUG(#{what => s2s_receive_result, from => From, to => To, message_id => Id, type => Type}), @@ -354,14 +354,12 @@ wait_for_validation({xmlstreamelement, El}, StateData) -> from => From, to => To, message_id => Id, type => Type}), case StateData#state.verify of false -> - NextState = wait_for_validation, %% TODO: Should'nt we close the connection here ? - {next_state, NextState, StateData, + {next_state, wait_for_validation, StateData, get_timeout_interval(NextState)}; {Pid, _Key, _SID} -> send_event_to_s2s_in(Type, Pid, StateData), - NextState = wait_for_validation, - {next_state, NextState, StateData, + {next_state, wait_for_validation, StateData, get_timeout_interval(NextState)} end; @@ -542,7 +540,7 @@ wait_before_retry(_Event, StateData) -> stream_established({xmlstreamelement, El}, StateData) -> ?LOG_DEBUG(#{what => s2s_out_stream_established, exml_packet => El, myname => StateData#state.myname, server => StateData#state.server}), - case parse_verify_result(El) of + case parse_dialback_with_type(El) of {db_verify, VTo, VFrom, VId, VType} -> ?LOG_DEBUG(#{what => s2s_recv_verify, to => VTo, from => VFrom, message_id => VId, type => VType, @@ -831,13 +829,13 @@ send_db_request(StateData) -> StateData#state.remote_streamid), %% Initiating Server Sends Dialback Key %% https://xmpp.org/extensions/xep-0220.html#example-1 - send_element(StateData, db_result_xml(StateData#state.from_to, Key1)) + send_element(StateData, db_result_key_xml(StateData#state.from_to, Key1)) end, case StateData#state.verify of false -> ok; {_Pid, Key2, SID} -> - send_element(StateData, db_verify_xml(StateData#state.from_to, Key2, SID)) + send_element(StateData, db_verify_key_xml(StateData#state.from_to, Key2, SID)) end, {next_state, wait_for_validation, NewStateData, ?FSMTIMEOUT*6} catch @@ -849,36 +847,38 @@ send_db_request(StateData) -> end. --spec parse_verify_result(exml:element()) -> false +%% Parse dialback verification result. +%% Verification result is stored in the `type' attribute and could be `valid' or `invalid'. +-spec parse_dialback_with_type(exml:element()) -> false | {db_verify | db_result, To :: binary(), From :: binary(), Id :: binary(), Type :: binary()}. -parse_verify_result(#xmlel{name = <<"db:result">>, attrs = Attrs}) -> +parse_dialback_with_type(#xmlel{name = <<"db:result">>, attrs = Attrs}) -> %% Receiving Server Sends Valid or Invalid Verification Result to Initiating Server (Step 4) {db_result, xml:get_attr_s(<<"to">>, Attrs), xml:get_attr_s(<<"from">>, Attrs), xml:get_attr_s(<<"id">>, Attrs), xml:get_attr_s(<<"type">>, Attrs)}; -parse_verify_result(#xmlel{name = <<"db:verify">>, attrs = Attrs}) -> +parse_dialback_with_type(#xmlel{name = <<"db:verify">>, attrs = Attrs}) -> %% Receiving Server is Informed by Authoritative Server that Key is Valid or Invalid (Step 3) {db_verify, xml:get_attr_s(<<"to">>, Attrs), xml:get_attr_s(<<"from">>, Attrs), xml:get_attr_s(<<"id">>, Attrs), xml:get_attr_s(<<"type">>, Attrs)}; -parse_verify_result(_) -> +parse_dialback_with_type(_) -> false. %% Initiating Server Sends Dialback Key (Step 1) --spec db_result_xml(ejabberd_s2s:fromto(), binary()) -> exml:element(). -db_result_xml({LocalServer, RemoteServer}, Key) -> +-spec db_result_key_xml(ejabberd_s2s:fromto(), binary()) -> exml:element(). +db_result_key_xml({LocalServer, RemoteServer}, Key) -> #xmlel{name = <<"db:result">>, attrs = [{<<"from">>, LocalServer}, {<<"to">>, RemoteServer}], children = [#xmlcdata{content = Key}]}. %% Receiving Server Sends Verification Request to Authoritative Server (Step 2) --spec db_verify_xml(ejabberd_s2s:fromto(), binary(), binary()) -> exml:element(). -db_verify_xml({LocalServer, RemoteServer}, Key, Id) -> +-spec db_verify_key_xml(ejabberd_s2s:fromto(), binary(), binary()) -> exml:element(). +db_verify_key_xml({LocalServer, RemoteServer}, Key, Id) -> #xmlel{name = <<"db:verify">>, attrs = [{<<"from">>, LocalServer}, {<<"to">>, RemoteServer}, From 69f8b2aadbef1acf6fabead6e0febb232e0be927 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 15:48:46 +0200 Subject: [PATCH 094/161] Replace db_ with dialback --- src/ejabberd_s2s_out.erl | 46 ++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index ca9b8a94801..25de2bebb37 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -73,7 +73,7 @@ tls_enabled = false :: boolean(), tls_options :: mongoose_tls:options(), authenticated = false :: boolean(), - db_enabled = true :: boolean(), + dialback_enabled = true :: boolean(), try_auth = true :: boolean(), from_to :: ejabberd_s2s:fromto(), myname, server, queue, @@ -292,23 +292,23 @@ wait_for_stream({xmlstreamstart, _Name, Attrs}, StateData0) -> xml:get_attr_s(<<"xmlns:db">>, Attrs), xml:get_attr_s(<<"version">>, Attrs) == <<"1.0">>} of {<<"jabber:server">>, <<"jabber:server:dialback">>, false} -> - send_db_request(StateData); + send_dialback_request(StateData); {<<"jabber:server">>, <<"jabber:server:dialback">>, true} when StateData#state.use_v10 -> {next_state, wait_for_features, StateData, ?FSMTIMEOUT}; %% Clause added to handle Tigase's workaround for an old ejabberd bug: {<<"jabber:server">>, <<"jabber:server:dialback">>, true} when not StateData#state.use_v10 -> - send_db_request(StateData); + send_dialback_request(StateData); {<<"jabber:server">>, <<"">>, true} when StateData#state.use_v10 -> - {next_state, wait_for_features, StateData#state{db_enabled = false}, ?FSMTIMEOUT}; + {next_state, wait_for_features, StateData#state{dialback_enabled = false}, ?FSMTIMEOUT}; {NSProvided, DB, _} -> send_element(StateData, mongoose_xmpp_errors:invalid_namespace()), ?LOG_INFO(#{what => s2s_out_closing, text => <<"Closing s2s connection: (invalid namespace)">>, namespace_provided => NSProvided, namespace_expected => <<"jabber:server">>, - xmlnsdb_provided => DB, + xmlnsdialback_provided => DB, all_attributes => Attrs, myname => StateData#state.myname, server => StateData#state.server}), {stop, normal, StateData} @@ -328,7 +328,7 @@ wait_for_stream(closed, StateData) -> -spec wait_for_validation(ejabberd:xml_stream_item(), state()) -> fsm_return(). wait_for_validation({xmlstreamelement, El}, StateData) -> case parse_dialback_with_type(El) of - {db_result, To, From, Id, Type} -> + {dialback_result, To, From, Id, Type} -> ?LOG_DEBUG(#{what => s2s_receive_result, from => From, to => To, message_id => Id, type => Type}), case {Type, StateData#state.tls_enabled, StateData#state.tls_required} of @@ -349,7 +349,7 @@ wait_for_validation({xmlstreamelement, El}, StateData) -> %% TODO: bounce packets ?CLOSE_GENERIC(wait_for_validation, invalid_dialback_key, El, StateData) end; - {db_verify, To, From, Id, Type} -> + {dialback_verify, To, From, Id, Type} -> ?LOG_DEBUG(#{what => s2s_receive_verify, from => From, to => To, message_id => Id, type => Type}), case StateData#state.verify of @@ -541,7 +541,7 @@ stream_established({xmlstreamelement, El}, StateData) -> ?LOG_DEBUG(#{what => s2s_out_stream_established, exml_packet => El, myname => StateData#state.myname, server => StateData#state.server}), case parse_dialback_with_type(El) of - {db_verify, VTo, VFrom, VId, VType} -> + {dialback_verify, VTo, VFrom, VId, VType} -> ?LOG_DEBUG(#{what => s2s_recv_verify, to => VTo, from => VFrom, message_id => VId, type => VType, myname => StateData#state.myname, server => StateData#state.server}), @@ -614,7 +614,7 @@ handle_sync_event(get_state_infos, _From, StateName, StateData) -> {tls_enabled, StateData#state.tls_enabled}, {tls_options, StateData#state.tls_options}, {authenticated, StateData#state.authenticated}, - {db_enabled, StateData#state.db_enabled}, + {dialback_enabled, StateData#state.dialback_enabled}, {try_auth, StateData#state.try_auth}, {myname, StateData#state.myname}, {server, StateData#state.server}, @@ -808,8 +808,8 @@ bounce_messages(Error) -> end. --spec send_db_request(state()) -> fsm_return(). -send_db_request(StateData) -> +-spec send_dialback_request(state()) -> fsm_return(). +send_dialback_request(StateData) -> IsRegistered = case StateData#state.is_registered of false -> ejabberd_s2s:try_register(StateData#state.from_to); @@ -829,18 +829,18 @@ send_db_request(StateData) -> StateData#state.remote_streamid), %% Initiating Server Sends Dialback Key %% https://xmpp.org/extensions/xep-0220.html#example-1 - send_element(StateData, db_result_key_xml(StateData#state.from_to, Key1)) + send_element(StateData, dialback_result_key_xml(StateData#state.from_to, Key1)) end, case StateData#state.verify of false -> ok; {_Pid, Key2, SID} -> - send_element(StateData, db_verify_key_xml(StateData#state.from_to, Key2, SID)) + send_element(StateData, dialback_verify_key_xml(StateData#state.from_to, Key2, SID)) end, {next_state, wait_for_validation, NewStateData, ?FSMTIMEOUT*6} catch Class:Reason:Stacktrace -> - ?LOG_ERROR(#{what => s2s_out_send_db_request_failed, + ?LOG_ERROR(#{what => s2s_out_send_dialback_request_failed, class => Class, reason => Reason, stacktrace => Stacktrace, myname => StateData#state.myname, server => StateData#state.server}), {stop, normal, NewStateData} @@ -850,17 +850,17 @@ send_db_request(StateData) -> %% Parse dialback verification result. %% Verification result is stored in the `type' attribute and could be `valid' or `invalid'. -spec parse_dialback_with_type(exml:element()) -> false - | {db_verify | db_result, To :: binary(), From :: binary(), Id :: binary(), Type :: binary()}. + | {dialback_verify | dialback_result, To :: binary(), From :: binary(), Id :: binary(), Type :: binary()}. parse_dialback_with_type(#xmlel{name = <<"db:result">>, attrs = Attrs}) -> %% Receiving Server Sends Valid or Invalid Verification Result to Initiating Server (Step 4) - {db_result, + {dialback_result, xml:get_attr_s(<<"to">>, Attrs), xml:get_attr_s(<<"from">>, Attrs), xml:get_attr_s(<<"id">>, Attrs), xml:get_attr_s(<<"type">>, Attrs)}; parse_dialback_with_type(#xmlel{name = <<"db:verify">>, attrs = Attrs}) -> %% Receiving Server is Informed by Authoritative Server that Key is Valid or Invalid (Step 3) - {db_verify, + {dialback_verify, xml:get_attr_s(<<"to">>, Attrs), xml:get_attr_s(<<"from">>, Attrs), xml:get_attr_s(<<"id">>, Attrs), @@ -869,16 +869,16 @@ parse_dialback_with_type(_) -> false. %% Initiating Server Sends Dialback Key (Step 1) --spec db_result_key_xml(ejabberd_s2s:fromto(), binary()) -> exml:element(). -db_result_key_xml({LocalServer, RemoteServer}, Key) -> +-spec dialback_result_key_xml(ejabberd_s2s:fromto(), binary()) -> exml:element(). +dialback_result_key_xml({LocalServer, RemoteServer}, Key) -> #xmlel{name = <<"db:result">>, attrs = [{<<"from">>, LocalServer}, {<<"to">>, RemoteServer}], children = [#xmlcdata{content = Key}]}. %% Receiving Server Sends Verification Request to Authoritative Server (Step 2) --spec db_verify_key_xml(ejabberd_s2s:fromto(), binary(), binary()) -> exml:element(). -db_verify_key_xml({LocalServer, RemoteServer}, Key, Id) -> +-spec dialback_verify_key_xml(ejabberd_s2s:fromto(), binary(), binary()) -> exml:element(). +dialback_verify_key_xml({LocalServer, RemoteServer}, Key, Id) -> #xmlel{name = <<"db:verify">>, attrs = [{<<"from">>, LocalServer}, {<<"to">>, RemoteServer}, @@ -1186,8 +1186,8 @@ handle_parsed_features({_, _, true, StateData = #state{tls = false}}) -> {next_state, reopen_socket, StateData#state{socket = undefined, use_v10 = false}, ?FSMTIMEOUT}; -handle_parsed_features({_, _, _, StateData = #state{db_enabled = true}}) -> - send_db_request(StateData); +handle_parsed_features({_, _, _, StateData = #state{dialback_enabled = true}}) -> + send_dialback_request(StateData); handle_parsed_features({_, _, _, StateData}) -> ?LOG_DEBUG(#{what => s2s_out_restarted, myname => StateData#state.myname, server => StateData#state.server}), From 5d5198f26fc234864b52f5bd5fe5530e6516c7c6 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 16:59:20 +0200 Subject: [PATCH 095/161] Use Dialback steps (mongoose_s2s_dialback module) Add next_state/2 helper function for ejabberd_s2s_out --- src/ejabberd_s2s.erl | 2 +- src/ejabberd_s2s_in.erl | 86 +++++--------------- src/ejabberd_s2s_out.erl | 121 +++++++++------------------ src/s2s/mongoose_s2s_dialback.erl | 131 ++++++++++++++++++++++++++++++ src/s2s/mongoose_s2s_mnesia.erl | 2 +- 5 files changed, 191 insertions(+), 151 deletions(-) create mode 100644 src/s2s/mongoose_s2s_dialback.erl diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index 6d23b47ea6c..61df97509b5 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -267,7 +267,7 @@ get_s2s_out_pids(FromTo) -> call_try_register(Pid, FromTo) -> mongoose_s2s_backend:try_register(Pid, FromTo). --spec call_try_register(Node :: node()) -> ok. +-spec call_node_cleanup(Node :: node()) -> ok. call_node_cleanup(Node) -> mongoose_s2s_backend:node_cleanup(Node). diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index 6c445e9873c..6952b8a19e3 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -299,11 +299,11 @@ tls_options_with_certfile(#state{host_type = HostType, tls_options = TLSOptions} stream_established({xmlstreamelement, El}, StateData) -> cancel_timer(StateData#state.timer), Timer = erlang:start_timer(mongoose_s2s_lib:timeout(), self(), []), - case parse_dialback_with_key(El) of + case mongoose_s2s_dialback:parse_key(El) of %% Incoming dialback key, we have to verify it using ejabberd_s2s_out before %% accepting any incoming stanzas - %% (we have to receive an event `{valid, FromTo}' from `ejabberd_s2s_out' process). - {dialback_result, FromTo, Id, Key} -> + %% (we have to receive the `validity_from_s2s_out' event first). + {step_1, FromTo, Id, Key} -> ?LOG_DEBUG(#{what => s2s_in_get_key, from_to => FromTo, message_id => Id, key => Key}), %% Checks if the from domain is allowed and if the to @@ -332,32 +332,19 @@ stream_established({xmlstreamelement, El}, StateData) -> end; %% Incoming dialback verification request %% We have to check it using secrets and reply if it is valid or not - {dialback_verify, FromTo, Id, Key} -> + {step_2, FromTo, Id, Key} -> ?LOG_DEBUG(#{what => s2s_in_verify_key, from_to => FromTo, message_id => Id, key => Key}), - Type = case ejabberd_s2s:key(StateData#state.host_type, FromTo, Id) of - Key -> <<"valid">>; - _ -> <<"invalid">> - end, - send_element(StateData, dialback_verify_type_xml(FromTo, Id, Type)), + IsValid = Key =:= ejabberd_s2s:key(StateData#state.host_type, FromTo, Id), + send_element(StateData, mongoose_s2s_dialback:step_3(FromTo, Id, IsValid)), {next_state, stream_established, StateData#state{timer = Timer}}; false -> Res = parse_and_route_incoming_stanza(El, StateData), handle_routing_result(Res, El, StateData), {next_state, stream_established, StateData#state{timer = Timer}} end; -%% An event from ejabberd_s2s_out -stream_established({valid, FromTo}, StateData) -> - send_element(StateData, dialback_result_type_xml(FromTo, <<"valid">>)), - Cons = maps:put(FromTo, established, StateData#state.connections), - NSD = StateData#state{connections = Cons}, - {next_state, stream_established, NSD}; -%% An event from ejabberd_s2s_out -stream_established({invalid, FromTo}, StateData) -> - send_element(StateData, dialback_result_type_xml(FromTo, <<"invalid">>)), - Cons = maps:remove(FromTo, StateData#state.connections), - NSD = StateData#state{connections = Cons}, - {next_state, stream_established, NSD}; +stream_established({validity_from_s2s_out, IsValid, FromTo}, StateData) -> + handle_validity_from_s2s_out(IsValid, FromTo, StateData); stream_established({xmlstreamend, _Name}, StateData) -> send_text(StateData, ?STREAM_TRAILER), {stop, normal, StateData}; @@ -371,6 +358,17 @@ stream_established(timeout, StateData) -> stream_established(closed, StateData) -> {stop, normal, StateData}. +-spec handle_validity_from_s2s_out(boolean(), ejabberd_s2s:fromto(), #state{}) -> + {next_state, stream_established, #state{}}. +handle_validity_from_s2s_out(IsValid, FromTo, StateData) -> + send_element(StateData, mongoose_s2s_dialback:step_4(FromTo, IsValid)), + {next_state, stream_established, update_connections(IsValid, FromTo, StateData)}. + +update_connections(true, FromTo, StateData = #state{connections = Cons}) -> + StateData#state{connections = maps:put(FromTo, established, Cons)}; +update_connections(false, FromTo, StateData = #state{connections = Cons}) -> + StateData#state{connections = maps:remove(FromTo, Cons)}. + handle_routing_result(ok, _El, _StateData) -> ok; handle_routing_result({error, Reason}, El, _StateData) -> @@ -560,43 +558,6 @@ cancel_timer(Timer) -> ok end. -%% XEP-0185: Dialback Key Generation and Validation -%% DB means dial-back -%% Receiving Server is Informed by Authoritative Server that Key is Valid or Invalid (Step 3) --spec dialback_verify_type_xml(ejabberd_s2s:fromto(), binary(), binary()) -> exml:element(). -dialback_verify_type_xml({LocalServer, RemoteServer}, Id, Type) -> - #xmlel{name = <<"db:verify">>, - attrs = [{<<"from">>, LocalServer}, - {<<"to">>, RemoteServer}, - {<<"id">>, Id}, - {<<"type">>, Type}]}. - --spec dialback_result_type_xml(ejabberd_s2s:fromto(), binary()) -> exml:element(). -dialback_result_type_xml({LocalServer, RemoteServer}, Type) -> - %% Receiving Server Sends Valid or Invalid Verification Result to Initiating Server (Step 4) - #xmlel{name = <<"db:result">>, - attrs = [{<<"from">>, LocalServer}, - {<<"to">>, RemoteServer}, - {<<"type">>, Type}]}. - --spec parse_dialback_with_key(exml:element()) -> false - | {dialback_result | dialback_verify, - FromTo :: ejabberd_s2s:fromto(), Id :: binary(), Key :: binary()}. -parse_dialback_with_key(El = #xmlel{name = <<"db:result">>}) -> - %% Initiating Server Sends Dialback Key (Step 1) - parsed_dialback_with_key(dialback_result, El); -parse_dialback_with_key(El = #xmlel{name = <<"db:verify">>}) -> - %% Receiving Server Sends Verification Request to Authoritative Server (Step 2) - parsed_dialback_with_key(dialback_verify, El); -parse_dialback_with_key(_) -> - false. - -parsed_dialback_with_key(Type, El) -> - FromTo = parse_from_to(El), - Id = exml_query:attr(El, <<"id">>, <<>>), - Key = exml_query:cdata(El), - {Type, FromTo, Id, Key}. - -spec match_domain(binary(), binary()) -> boolean(). match_domain(Domain, Domain) -> true; @@ -707,12 +668,3 @@ get_tls_xmlel(#state{tls_enabled = false, tls_required = true}) -> is_local_host_known({LLocalServer, _}) -> mongoose_router:is_registered_route(LLocalServer) orelse mongoose_component:has_component(LLocalServer). - --spec parse_from_to(exml:element()) -> ejabberd_s2s:fromto(). -parse_from_to(El) -> - RemoteJid = jid:from_binary(exml_query:attr(El, <<"from">>, <<>>)), - LocalJid = jid:from_binary(exml_query:attr(El, <<"to">>, <<>>)), - #jid{luser = <<>>, lresource = <<>>, lserver = LRemoteServer} = RemoteJid, - #jid{luser = <<>>, lresource = <<>>, lserver = LLocalServer} = LocalJid, - %% We use fromto() as seen by ejabberd_s2s_out and ejabberd_s2s - {LLocalServer, LRemoteServer}. diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index 25de2bebb37..7cd2d132082 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -308,7 +308,7 @@ wait_for_stream({xmlstreamstart, _Name, Attrs}, StateData0) -> text => <<"Closing s2s connection: (invalid namespace)">>, namespace_provided => NSProvided, namespace_expected => <<"jabber:server">>, - xmlnsdialback_provided => DB, + xmlns_dialback_provided => DB, all_attributes => Attrs, myname => StateData#state.myname, server => StateData#state.server}), {stop, normal, StateData} @@ -327,14 +327,25 @@ wait_for_stream(closed, StateData) -> -spec wait_for_validation(ejabberd:xml_stream_item(), state()) -> fsm_return(). wait_for_validation({xmlstreamelement, El}, StateData) -> - case parse_dialback_with_type(El) of - {dialback_result, To, From, Id, Type} -> + case mongoose_s2s_dialback:parse_validity(El) of + {step_3, FromTo, Id, IsValid} -> + ?LOG_DEBUG(#{what => s2s_receive_verify, + from_to => FromTo, message_id => Id, is_valid => IsValid}), + case StateData#state.verify of + false -> + %% TODO: Should'nt we close the connection here ? + next_state(wait_for_validation, StateData); + {Pid, _Key, _SID} -> + send_event_to_s2s_in(IsValid, Pid, StateData), + next_state(wait_for_validation, StateData) + end; + {step_4, FromTo, Id, IsValid} -> ?LOG_DEBUG(#{what => s2s_receive_result, - from => From, to => To, message_id => Id, type => Type}), - case {Type, StateData#state.tls_enabled, StateData#state.tls_required} of - {<<"valid">>, Enabled, Required} when (Enabled==true) or (Required==false) -> - %% Initiating Server Receives Valid Verification Result from Receiving Server (Step 4) - %% https://xmpp.org/extensions/xep-0220.html#example-2 + from_to => FromTo, message_id => Id, is_valid => IsValid}), + #state{tls_enabled = Enabled, tls_required = Required} = StateData, + case IsValid of + true when (Enabled==true) or (Required==false) -> + %% Initiating server receives valid verification result from receiving server (Step 4) send_queue(StateData, StateData#state.queue), ?LOG_INFO(#{what => s2s_out_connected, text => <<"New outgoing s2s connection established">>, @@ -342,28 +353,14 @@ wait_for_validation({xmlstreamelement, El}, StateData) -> myname => StateData#state.myname, server => StateData#state.server}), {next_state, stream_established, StateData#state{queue = queue:new()}}; - {<<"valid">>, Enabled, Required} when (Enabled==false) and (Required==true) -> + true when (Enabled==false) and (Required==true) -> %% TODO: bounce packets ?CLOSE_GENERIC(wait_for_validation, tls_required_but_unavailable, El, StateData); _ -> %% TODO: bounce packets ?CLOSE_GENERIC(wait_for_validation, invalid_dialback_key, El, StateData) end; - {dialback_verify, To, From, Id, Type} -> - ?LOG_DEBUG(#{what => s2s_receive_verify, - from => From, to => To, message_id => Id, type => Type}), - case StateData#state.verify of - false -> - %% TODO: Should'nt we close the connection here ? - {next_state, wait_for_validation, StateData, - get_timeout_interval(NextState)}; - {Pid, _Key, _SID} -> - send_event_to_s2s_in(Type, Pid, StateData), - {next_state, wait_for_validation, StateData, - get_timeout_interval(NextState)} - - end; - _ -> + false -> {next_state, wait_for_validation, StateData, ?FSMTIMEOUT*3} end; wait_for_validation({xmlstreamend, _Name}, StateData) -> @@ -540,14 +537,14 @@ wait_before_retry(_Event, StateData) -> stream_established({xmlstreamelement, El}, StateData) -> ?LOG_DEBUG(#{what => s2s_out_stream_established, exml_packet => El, myname => StateData#state.myname, server => StateData#state.server}), - case parse_dialback_with_type(El) of - {dialback_verify, VTo, VFrom, VId, VType} -> + case mongoose_s2s_dialback:parse_validity(El) of + {step_3, FromTo, VId, IsValid} -> ?LOG_DEBUG(#{what => s2s_recv_verify, - to => VTo, from => VFrom, message_id => VId, type => VType, + from_to => FromTo, message_id => VId, is_valid => IsValid, myname => StateData#state.myname, server => StateData#state.server}), case StateData#state.verify of {VPid, _VKey, _SID} -> - send_event_to_s2s_in(VType, VPid, StateData); + send_event_to_s2s_in(IsValid, VPid, StateData); _ -> ok end; @@ -587,7 +584,7 @@ stream_established(closed, StateData) -> %% {stop, Reason, NewStateData} %%---------------------------------------------------------------------- handle_event(_Event, StateName, StateData) -> - {next_state, StateName, StateData, get_timeout_interval(StateName)}. + next_state(StateName, StateData). %%---------------------------------------------------------------------- %% Func: handle_sync_event/4 @@ -661,8 +658,7 @@ handle_info({send_element, Acc, El}, StateName, StateData) -> {next_state, StateName, StateData}; _ -> Q = queue:in({Acc, El}, StateData#state.queue), - {next_state, StateName, StateData#state{queue = Q}, - get_timeout_interval(StateName)} + next_state(StateName, StateData#state{queue = Q}) end; handle_info({timeout, Timer, _}, wait_before_retry, #state{timer = Timer} = StateData) -> @@ -677,9 +673,9 @@ handle_info({timeout, Timer, _}, StateName, handle_info(terminate_if_waiting_before_retry, wait_before_retry, StateData) -> ?CLOSE_GENERIC(wait_before_retry, terminate_if_waiting_before_retry, StateData); handle_info(terminate_if_waiting_before_retry, StateName, StateData) -> - {next_state, StateName, StateData, get_timeout_interval(StateName)}; + next_state(StateName, StateData); handle_info(_, StateName, StateData) -> - {next_state, StateName, StateData, get_timeout_interval(StateName)}. + next_state(StateName, StateData). %%---------------------------------------------------------------------- %% Func: terminate/3 @@ -827,15 +823,15 @@ send_dialback_request(StateData) -> StateData#state.host_type, StateData#state.from_to, StateData#state.remote_streamid), - %% Initiating Server Sends Dialback Key - %% https://xmpp.org/extensions/xep-0220.html#example-1 - send_element(StateData, dialback_result_key_xml(StateData#state.from_to, Key1)) + %% Initiating server sends dialback key + send_element(StateData, mongoose_s2s_dialback:step_1(StateData#state.from_to, Key1)) end, case StateData#state.verify of false -> ok; {_Pid, Key2, SID} -> - send_element(StateData, dialback_verify_key_xml(StateData#state.from_to, Key2, SID)) + %% Receiving server sends verification request + send_element(StateData, mongoose_s2s_dialback:step_2(StateData#state.from_to, Key2, SID)) end, {next_state, wait_for_validation, NewStateData, ?FSMTIMEOUT*6} catch @@ -846,46 +842,6 @@ send_dialback_request(StateData) -> {stop, normal, NewStateData} end. - -%% Parse dialback verification result. -%% Verification result is stored in the `type' attribute and could be `valid' or `invalid'. --spec parse_dialback_with_type(exml:element()) -> false - | {dialback_verify | dialback_result, To :: binary(), From :: binary(), Id :: binary(), Type :: binary()}. -parse_dialback_with_type(#xmlel{name = <<"db:result">>, attrs = Attrs}) -> - %% Receiving Server Sends Valid or Invalid Verification Result to Initiating Server (Step 4) - {dialback_result, - xml:get_attr_s(<<"to">>, Attrs), - xml:get_attr_s(<<"from">>, Attrs), - xml:get_attr_s(<<"id">>, Attrs), - xml:get_attr_s(<<"type">>, Attrs)}; -parse_dialback_with_type(#xmlel{name = <<"db:verify">>, attrs = Attrs}) -> - %% Receiving Server is Informed by Authoritative Server that Key is Valid or Invalid (Step 3) - {dialback_verify, - xml:get_attr_s(<<"to">>, Attrs), - xml:get_attr_s(<<"from">>, Attrs), - xml:get_attr_s(<<"id">>, Attrs), - xml:get_attr_s(<<"type">>, Attrs)}; -parse_dialback_with_type(_) -> - false. - -%% Initiating Server Sends Dialback Key (Step 1) --spec dialback_result_key_xml(ejabberd_s2s:fromto(), binary()) -> exml:element(). -dialback_result_key_xml({LocalServer, RemoteServer}, Key) -> - #xmlel{name = <<"db:result">>, - attrs = [{<<"from">>, LocalServer}, - {<<"to">>, RemoteServer}], - children = [#xmlcdata{content = Key}]}. - -%% Receiving Server Sends Verification Request to Authoritative Server (Step 2) --spec dialback_verify_key_xml(ejabberd_s2s:fromto(), binary(), binary()) -> exml:element(). -dialback_verify_key_xml({LocalServer, RemoteServer}, Key, Id) -> - #xmlel{name = <<"db:verify">>, - attrs = [{<<"from">>, LocalServer}, - {<<"to">>, RemoteServer}, - {<<"id">>, Id}], - children = [#xmlcdata{content = Key}]}. - - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% SRV support @@ -1015,6 +971,10 @@ log_s2s_out(_, Myname, Server, Tls) -> text => <<"Trying to open s2s connection">>, myname => Myname, server => Server, tls => Tls}). +next_state(StateName, StateData) -> + {next_state, StateName, StateData, + get_timeout_interval(StateName)}. + %% @doc Calculate timeout depending on which state we are in: %% Can return integer > 0 | infinity -spec get_timeout_interval(statename()) -> 'infinity' | non_neg_integer(). @@ -1110,11 +1070,8 @@ get_predefined_port(HostType, _Addr) -> outgoing_s2s_port(HostType). addr_type(Addr) when tuple_size(Addr) =:= 4 -> inet; addr_type(Addr) when tuple_size(Addr) =:= 8 -> inet6. -send_event_to_s2s_in(<<"valid">>, Pid, StateData) -> - Event = {valid, StateData#state.from_to}, - p1_fsm:send_event(Pid, Event); -send_event_to_s2s_in(_, Pid, StateData) -> - Event = {invalid, StateData#state.from_to}, +send_event_to_s2s_in(IsValid, Pid, StateData) when is_boolean(IsValid) -> + Event = {validity_from_s2s_out, IsValid, StateData#state.from_to}, p1_fsm:send_event(Pid, Event). get_acc_with_new_sext(?NS_SASL, Els1, {_SEXT, STLS, STLSReq}) -> diff --git a/src/s2s/mongoose_s2s_dialback.erl b/src/s2s/mongoose_s2s_dialback.erl new file mode 100644 index 00000000000..ee3abf4e0d0 --- /dev/null +++ b/src/s2s/mongoose_s2s_dialback.erl @@ -0,0 +1,131 @@ +%% Steps for S2S Dialback. +%% Diagram from https://xmpp.org/extensions/xep-0220.html#intro-howitworks +%% +%% Initiating Receiving +%% Server Server +%% ----------- --------- +%% | | +%% | [if necessary, | +%% | perform DNS lookup | +%% | on Target Domain, | +%% | open TCP connection, | +%% | and establish stream] | +%% | -----------------------> | +%% | | Authoritative +%% | send dialback key | Server +%% | -------(STEP 1)--------> | ------------- +%% | | | +%% | | [if necessary, | +%% | | perform DNS lookup, | +%% | | on Sender Domain, | +%% | | open TCP connection, | +%% | | and establish stream] | +%% | | -----------------------> | +%% | | | +%% | | send verify request | +%% | | -------(STEP 2)--------> | +%% | | | +%% | | send verify response | +%% | | <------(STEP 3)--------- | +%% | | +%% | report dialback result | +%% | <-------(STEP 4)-------- | +%% | | + +%% Because db:result and db:verify tags are confusing, use step numbers. +%% (db:result should've been named db:key). + +-module(mongoose_s2s_dialback). +-export([step_1/2, + step_2/3, + step_3/3, + step_4/2]). + +-export([parse_key/1, + parse_validity/1]). + +-include("mongoose.hrl"). +-include("jlib.hrl"). + +%% Initiating server sends dialback key +%% https://xmpp.org/extensions/xep-0220.html#example-1 +-spec step_1(ejabberd_s2s:fromto(), binary()) -> exml:element(). +step_1({LocalServer, RemoteServer}, Key) -> + #xmlel{name = <<"db:result">>, + attrs = [{<<"from">>, LocalServer}, + {<<"to">>, RemoteServer}], + children = [#xmlcdata{content = Key}]}. + +%% Receiving server sends verification request to authoritative server (step 2) +-spec step_2(ejabberd_s2s:fromto(), binary(), binary()) -> exml:element(). +step_2({LocalServer, RemoteServer}, Key, Id) -> + #xmlel{name = <<"db:verify">>, + attrs = [{<<"from">>, LocalServer}, + {<<"to">>, RemoteServer}, + {<<"id">>, Id}], + children = [#xmlcdata{content = Key}]}. + +%% Receiving server is informed by authoritative server that key is valid or invalid (step 3) +-spec step_3(ejabberd_s2s:fromto(), binary(), boolean()) -> exml:element(). +step_3({LocalServer, RemoteServer}, Id, IsValid) -> + #xmlel{name = <<"db:verify">>, + attrs = [{<<"from">>, LocalServer}, + {<<"to">>, RemoteServer}, + {<<"id">>, Id}, + {<<"type">>, is_valid_to_type(IsValid)}]}. + +%% Receiving server sends valid or invalid verification result to initiating server (step 4) +-spec step_4(ejabberd_s2s:fromto(), boolean()) -> exml:element(). +step_4({LocalServer, RemoteServer}, IsValid) -> + #xmlel{name = <<"db:result">>, + attrs = [{<<"from">>, LocalServer}, + {<<"to">>, RemoteServer}, + {<<"type">>, is_valid_to_type(IsValid)}]}. + +is_valid_to_type(true) -> <<"valid">>; +is_valid_to_type(false) -> <<"invalid">>. + +-spec parse_key(exml:element()) -> false + | {step_1 | step_2, FromTo :: ejabberd_s2s:fromto(), Id :: binary(), Key :: binary()}. +parse_key(El = #xmlel{name = <<"db:result">>}) -> + %% Initiating Server Sends Dialback Key (Step 1) + parse_key(step_1, El); +parse_key(El = #xmlel{name = <<"db:verify">>}) -> + %% Receiving Server Sends Verification Request to Authoritative Server (Step 2) + parse_key(step_2, El); +parse_key(_) -> + false. + +parse_key(Step, El) -> + FromTo = parse_from_to(El), + Id = exml_query:attr(El, <<"id">>, <<>>), + Key = exml_query:cdata(El), + {Step, FromTo, Id, Key}. + +%% Parse dialback verification result. +%% Verification result is stored in the `type' attribute and could be `valid' or `invalid'. +-spec parse_validity(exml:element()) -> false + | {step_3 | step_4, FromTo :: ejabberd_s2s:fromto(), Id :: binary(), IsValid :: boolean()}. +parse_validity(El = #xmlel{name = <<"db:verify">>}) -> + %% Receiving Server is Informed by Authoritative Server that Key is Valid or Invalid (Step 3) + parse_validity(step_3, El); +parse_validity(El = #xmlel{name = <<"db:result">>}) -> + %% Receiving Server Sends Valid or Invalid Verification Result to Initiating Server (Step 4) + parse_validity(step_4, El); +parse_validity(_) -> + false. + +parse_validity(Step, El) -> + FromTo = parse_from_to(El), + Id = exml_query:attr(El, <<"id">>, <<>>), + IsValid = exml_query:attr(El, <<"type">>) =:= <<"valid">>, + {Step, FromTo, Id, IsValid}. + +-spec parse_from_to(exml:element()) -> ejabberd_s2s:fromto(). +parse_from_to(El) -> + RemoteJid = jid:from_binary(exml_query:attr(El, <<"from">>, <<>>)), + LocalJid = jid:from_binary(exml_query:attr(El, <<"to">>, <<>>)), + #jid{luser = <<>>, lresource = <<>>, lserver = LRemoteServer} = RemoteJid, + #jid{luser = <<>>, lresource = <<>>, lserver = LLocalServer} = LocalJid, + %% We use fromto() as seen by ejabberd_s2s_out and ejabberd_s2s + {LLocalServer, LRemoteServer}. diff --git a/src/s2s/mongoose_s2s_mnesia.erl b/src/s2s/mongoose_s2s_mnesia.erl index 1d804815e9a..e09ec15acc0 100644 --- a/src/s2s/mongoose_s2s_mnesia.erl +++ b/src/s2s/mongoose_s2s_mnesia.erl @@ -17,7 +17,7 @@ -record(s2s_shared, { host_type :: mongooseim:host_type(), - secret :: mongooseim:base16_secret() + secret :: ejabberd_s2s:base16_secret() }). -include("mongoose_logger.hrl"). From 0319a0df577665348dad25c8a4407fe594dc3607 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 17:33:45 +0200 Subject: [PATCH 096/161] Make ejabberd_s2s_in:send_validity_from_s2s_out function --- src/ejabberd_s2s_in.erl | 6 ++++++ src/ejabberd_s2s_out.erl | 16 +++++++--------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index 6952b8a19e3..be0a806295a 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -34,6 +34,7 @@ %% External exports -export([start/2, start_link/2, + send_validity_from_s2s_out/3, match_domain/2]). %% gen_fsm callbacks @@ -111,6 +112,11 @@ start_link(Socket, Opts) -> start_listener(Opts) -> mongoose_tcp_listener:start_listener(Opts). +-spec send_validity_from_s2s_out(pid(), boolean(), ejabberd_s2s:fromto()) -> ok. +send_validity_from_s2s_out(Pid, IsValid, FromTo) when is_boolean(IsValid) -> + Event = {validity_from_s2s_out, IsValid, FromTo}, + p1_fsm:send_event(Pid, Event). + %%%---------------------------------------------------------------------- %%% Callback functions from gen_fsm %%%---------------------------------------------------------------------- diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index 7cd2d132082..c284211cdc2 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -326,7 +326,7 @@ wait_for_stream(closed, StateData) -> -spec wait_for_validation(ejabberd:xml_stream_item(), state()) -> fsm_return(). -wait_for_validation({xmlstreamelement, El}, StateData) -> +wait_for_validation({xmlstreamelement, El}, StateData = #state{from_to = FromTo}) -> case mongoose_s2s_dialback:parse_validity(El) of {step_3, FromTo, Id, IsValid} -> ?LOG_DEBUG(#{what => s2s_receive_verify, @@ -336,7 +336,7 @@ wait_for_validation({xmlstreamelement, El}, StateData) -> %% TODO: Should'nt we close the connection here ? next_state(wait_for_validation, StateData); {Pid, _Key, _SID} -> - send_event_to_s2s_in(IsValid, Pid, StateData), + ejabberd_s2s_in:send_validity_from_s2s_out(Pid, IsValid, FromTo), next_state(wait_for_validation, StateData) end; {step_4, FromTo, Id, IsValid} -> @@ -534,7 +534,7 @@ wait_before_retry(_Event, StateData) -> {next_state, wait_before_retry, StateData, ?FSMTIMEOUT}. -spec stream_established(ejabberd:xml_stream_item(), state()) -> fsm_return(). -stream_established({xmlstreamelement, El}, StateData) -> +stream_established({xmlstreamelement, El}, StateData = #state{from_to = FromTo}) -> ?LOG_DEBUG(#{what => s2s_out_stream_established, exml_packet => El, myname => StateData#state.myname, server => StateData#state.server}), case mongoose_s2s_dialback:parse_validity(El) of @@ -544,11 +544,13 @@ stream_established({xmlstreamelement, El}, StateData) -> myname => StateData#state.myname, server => StateData#state.server}), case StateData#state.verify of {VPid, _VKey, _SID} -> - send_event_to_s2s_in(IsValid, VPid, StateData); + ejabberd_s2s_in:send_validity_from_s2s_out(VPid, IsValid, FromTo); _ -> ok end; - _ -> + {step_4, _FromTo, _Id, _IsValid} -> + ok; + false -> ok end, {next_state, stream_established, StateData}; @@ -1070,10 +1072,6 @@ get_predefined_port(HostType, _Addr) -> outgoing_s2s_port(HostType). addr_type(Addr) when tuple_size(Addr) =:= 4 -> inet; addr_type(Addr) when tuple_size(Addr) =:= 8 -> inet6. -send_event_to_s2s_in(IsValid, Pid, StateData) when is_boolean(IsValid) -> - Event = {validity_from_s2s_out, IsValid, StateData#state.from_to}, - p1_fsm:send_event(Pid, Event). - get_acc_with_new_sext(?NS_SASL, Els1, {_SEXT, STLS, STLSReq}) -> NewSEXT = lists:any( From c71366347814037b4c49a174ad9cc624aad15ad7 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 20:45:34 +0200 Subject: [PATCH 097/161] Properly check if the local host exist Fixes muc:register_over_s2s --- src/ejabberd_s2s_in.erl | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index be0a806295a..790078c6545 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -309,7 +309,7 @@ stream_established({xmlstreamelement, El}, StateData) -> %% Incoming dialback key, we have to verify it using ejabberd_s2s_out before %% accepting any incoming stanzas %% (we have to receive the `validity_from_s2s_out' event first). - {step_1, FromTo, Id, Key} -> + {step_1, FromTo, Id, Key} = Parsed -> ?LOG_DEBUG(#{what => s2s_in_get_key, from_to => FromTo, message_id => Id, key => Key}), %% Checks if the from domain is allowed and if the to @@ -329,11 +329,12 @@ stream_established({xmlstreamelement, El}, StateData) -> StateData#state{connections = Conns, timer = Timer}}; {_, false} -> send_element(StateData, mongoose_xmpp_errors:host_unknown()), - ?LOG_WARNING(#{what => s2s_in_key_from_uknown_host}), + ?LOG_WARNING(#{what => s2s_in_key_from_uknown_host, element => El, + parsed => Parsed, from_to => FromTo}), {stop, normal, StateData}; {false, _} -> send_element(StateData, mongoose_xmpp_errors:invalid_from()), - ?LOG_WARNING(#{what => s2s_in_key_with_invalid_from}), + ?LOG_WARNING(#{what => s2s_in_key_with_invalid_from, element => El}), {stop, normal, StateData} end; %% Incoming dialback verification request @@ -673,4 +674,13 @@ get_tls_xmlel(#state{tls_enabled = false, tls_required = true}) -> is_local_host_known({LLocalServer, _}) -> mongoose_router:is_registered_route(LLocalServer) - orelse mongoose_component:has_component(LLocalServer). + orelse mongoose_component:has_component(LLocalServer) + orelse is_known_domain(LLocalServer). + +is_known_domain(Domain) -> + case mongoose_domain_api:get_host_type(Domain) of + {ok, _HostType} -> + true; + _ -> + false + end. From a3c4aad7981f9c91da260b42f2768d7413c5da7f Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 21:25:27 +0200 Subject: [PATCH 098/161] Move make_key logic into mongoose_s2s_dialback More types --- src/ejabberd_s2s.erl | 12 ++++++------ src/s2s/mongoose_s2s_dialback.erl | 25 ++++++++++++++++++++----- 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index 61df97509b5..4ad566acf92 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -64,8 +64,10 @@ -record(state, {}). -type base16_secret() :: binary(). +-type stream_id() :: binary(). +-type s2s_dialback_key() :: binary(). --export_type([fromto/0, s2s_pids/0, base16_secret/0]). +-export_type([fromto/0, s2s_pids/0, base16_secret/0, stream_id/0, s2s_dialback_key/0]). %% API functions @@ -98,12 +100,10 @@ try_register(FromTo) -> end, IsRegistered. --spec key(mongooseim:host_type(), fromto(), binary()) -> binary(). -key(HostType, {From, To}, StreamID) -> +-spec key(mongooseim:host_type(), fromto(), stream_id()) -> s2s_dialback_key(). +key(HostType, FromTo, StreamID) -> {ok, Secret} = get_shared_secret(HostType), - SecretHashed = base16:encode(crypto:hash(sha256, Secret)), - HMac = crypto:mac(hmac, sha256, SecretHashed, [From, " ", To, " ", StreamID]), - base16:encode(HMac). + mongoose_s2s_dialback:make_key(FromTo, StreamID, Secret). %% Hooks callbacks diff --git a/src/s2s/mongoose_s2s_dialback.erl b/src/s2s/mongoose_s2s_dialback.erl index ee3abf4e0d0..6dc79ba91f0 100644 --- a/src/s2s/mongoose_s2s_dialback.erl +++ b/src/s2s/mongoose_s2s_dialback.erl @@ -44,12 +44,14 @@ -export([parse_key/1, parse_validity/1]). +-export([make_key/2]). + -include("mongoose.hrl"). -include("jlib.hrl"). %% Initiating server sends dialback key %% https://xmpp.org/extensions/xep-0220.html#example-1 --spec step_1(ejabberd_s2s:fromto(), binary()) -> exml:element(). +-spec step_1(ejabberd_s2s:fromto(), ejabberd_s2s:s2s_dialback_key()) -> exml:element(). step_1({LocalServer, RemoteServer}, Key) -> #xmlel{name = <<"db:result">>, attrs = [{<<"from">>, LocalServer}, @@ -57,7 +59,7 @@ step_1({LocalServer, RemoteServer}, Key) -> children = [#xmlcdata{content = Key}]}. %% Receiving server sends verification request to authoritative server (step 2) --spec step_2(ejabberd_s2s:fromto(), binary(), binary()) -> exml:element(). +-spec step_2(ejabberd_s2s:fromto(), ejabberd_s2s:s2s_dialback_key(), ejabberd_s2s:stream_id()) -> exml:element(). step_2({LocalServer, RemoteServer}, Key, Id) -> #xmlel{name = <<"db:verify">>, attrs = [{<<"from">>, LocalServer}, @@ -66,7 +68,7 @@ step_2({LocalServer, RemoteServer}, Key, Id) -> children = [#xmlcdata{content = Key}]}. %% Receiving server is informed by authoritative server that key is valid or invalid (step 3) --spec step_3(ejabberd_s2s:fromto(), binary(), boolean()) -> exml:element(). +-spec step_3(ejabberd_s2s:fromto(), ejabberd_s2s:stream_id(), boolean()) -> exml:element(). step_3({LocalServer, RemoteServer}, Id, IsValid) -> #xmlel{name = <<"db:verify">>, attrs = [{<<"from">>, LocalServer}, @@ -86,7 +88,10 @@ is_valid_to_type(true) -> <<"valid">>; is_valid_to_type(false) -> <<"invalid">>. -spec parse_key(exml:element()) -> false - | {step_1 | step_2, FromTo :: ejabberd_s2s:fromto(), Id :: binary(), Key :: binary()}. + | {Step :: step_1 | step_2, + FromTo :: ejabberd_s2s:fromto(), + Id :: ejabberd_s2s:stream_id(), + Key :: ejabberd_s2s:s2s_dialback_key()}. parse_key(El = #xmlel{name = <<"db:result">>}) -> %% Initiating Server Sends Dialback Key (Step 1) parse_key(step_1, El); @@ -105,7 +110,10 @@ parse_key(Step, El) -> %% Parse dialback verification result. %% Verification result is stored in the `type' attribute and could be `valid' or `invalid'. -spec parse_validity(exml:element()) -> false - | {step_3 | step_4, FromTo :: ejabberd_s2s:fromto(), Id :: binary(), IsValid :: boolean()}. + | {Step :: step_3 | step_4, + FromTo :: ejabberd_s2s:fromto(), + Id :: ejabberd_s2s:stream_id(), + IsValid :: boolean()}. parse_validity(El = #xmlel{name = <<"db:verify">>}) -> %% Receiving Server is Informed by Authoritative Server that Key is Valid or Invalid (Step 3) parse_validity(step_3, El); @@ -129,3 +137,10 @@ parse_from_to(El) -> #jid{luser = <<>>, lresource = <<>>, lserver = LLocalServer} = LocalJid, %% We use fromto() as seen by ejabberd_s2s_out and ejabberd_s2s {LLocalServer, LRemoteServer}. + +-spec make_key(fromto(), ejabberd_s2s:stream_id(), ejabberd_s2s:base16_secret()) -> + ejabberd_s2s:s2s_dialback_key(). +make_key({From, To}, StreamID, Secret) -> + SecretHashed = base16:encode(crypto:hash(sha256, Secret)), + HMac = crypto:mac(hmac, sha256, SecretHashed, [From, " ", To, " ", StreamID]), + base16:encode(HMac). From 312928a946a2c1775684ff15b183e441a51a845c Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 21:33:59 +0200 Subject: [PATCH 099/161] Use StreamID name in dialback functions --- src/ejabberd_s2s_in.erl | 12 ++++++------ src/ejabberd_s2s_out.erl | 14 +++++++------- src/s2s/mongoose_s2s_dialback.erl | 24 ++++++++++++------------ 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index 790078c6545..513f1fa9ac4 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -309,9 +309,9 @@ stream_established({xmlstreamelement, El}, StateData) -> %% Incoming dialback key, we have to verify it using ejabberd_s2s_out before %% accepting any incoming stanzas %% (we have to receive the `validity_from_s2s_out' event first). - {step_1, FromTo, Id, Key} = Parsed -> + {step_1, FromTo, StreamID, Key} = Parsed -> ?LOG_DEBUG(#{what => s2s_in_get_key, - from_to => FromTo, message_id => Id, key => Key}), + from_to => FromTo, stream_id => StreamID, key => Key}), %% Checks if the from domain is allowed and if the to %% domain is handled by this server: case {mongoose_s2s_lib:allow_host(FromTo), is_local_host_known(FromTo)} of @@ -339,11 +339,11 @@ stream_established({xmlstreamelement, El}, StateData) -> end; %% Incoming dialback verification request %% We have to check it using secrets and reply if it is valid or not - {step_2, FromTo, Id, Key} -> + {step_2, FromTo, StreamID, Key} -> ?LOG_DEBUG(#{what => s2s_in_verify_key, - from_to => FromTo, message_id => Id, key => Key}), - IsValid = Key =:= ejabberd_s2s:key(StateData#state.host_type, FromTo, Id), - send_element(StateData, mongoose_s2s_dialback:step_3(FromTo, Id, IsValid)), + from_to => FromTo, stream_id => StreamID, key => Key}), + IsValid = Key =:= ejabberd_s2s:key(StateData#state.host_type, FromTo, StreamID), + send_element(StateData, mongoose_s2s_dialback:step_3(FromTo, StreamID, IsValid)), {next_state, stream_established, StateData#state{timer = Timer}}; false -> Res = parse_and_route_incoming_stanza(El, StateData), diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index c284211cdc2..e86ea7f88fe 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -328,9 +328,9 @@ wait_for_stream(closed, StateData) -> -spec wait_for_validation(ejabberd:xml_stream_item(), state()) -> fsm_return(). wait_for_validation({xmlstreamelement, El}, StateData = #state{from_to = FromTo}) -> case mongoose_s2s_dialback:parse_validity(El) of - {step_3, FromTo, Id, IsValid} -> + {step_3, FromTo, StreamID, IsValid} -> ?LOG_DEBUG(#{what => s2s_receive_verify, - from_to => FromTo, message_id => Id, is_valid => IsValid}), + from_to => FromTo, stream_id => StreamID, is_valid => IsValid}), case StateData#state.verify of false -> %% TODO: Should'nt we close the connection here ? @@ -339,9 +339,9 @@ wait_for_validation({xmlstreamelement, El}, StateData = #state{from_to = FromTo} ejabberd_s2s_in:send_validity_from_s2s_out(Pid, IsValid, FromTo), next_state(wait_for_validation, StateData) end; - {step_4, FromTo, Id, IsValid} -> + {step_4, FromTo, StreamID, IsValid} -> ?LOG_DEBUG(#{what => s2s_receive_result, - from_to => FromTo, message_id => Id, is_valid => IsValid}), + from_to => FromTo, stream_id => StreamID, is_valid => IsValid}), #state{tls_enabled = Enabled, tls_required = Required} = StateData, case IsValid of true when (Enabled==true) or (Required==false) -> @@ -538,9 +538,9 @@ stream_established({xmlstreamelement, El}, StateData = #state{from_to = FromTo}) ?LOG_DEBUG(#{what => s2s_out_stream_established, exml_packet => El, myname => StateData#state.myname, server => StateData#state.server}), case mongoose_s2s_dialback:parse_validity(El) of - {step_3, FromTo, VId, IsValid} -> + {step_3, FromTo, StreamID, IsValid} -> ?LOG_DEBUG(#{what => s2s_recv_verify, - from_to => FromTo, message_id => VId, is_valid => IsValid, + from_to => FromTo, stream_id => StreamID, is_valid => IsValid, myname => StateData#state.myname, server => StateData#state.server}), case StateData#state.verify of {VPid, _VKey, _SID} -> @@ -548,7 +548,7 @@ stream_established({xmlstreamelement, El}, StateData = #state{from_to = FromTo}) _ -> ok end; - {step_4, _FromTo, _Id, _IsValid} -> + {step_4, _FromTo, _StreamID, _IsValid} -> ok; false -> ok diff --git a/src/s2s/mongoose_s2s_dialback.erl b/src/s2s/mongoose_s2s_dialback.erl index 6dc79ba91f0..1844f0fea71 100644 --- a/src/s2s/mongoose_s2s_dialback.erl +++ b/src/s2s/mongoose_s2s_dialback.erl @@ -44,7 +44,7 @@ -export([parse_key/1, parse_validity/1]). --export([make_key/2]). +-export([make_key/3]). -include("mongoose.hrl"). -include("jlib.hrl"). @@ -60,20 +60,20 @@ step_1({LocalServer, RemoteServer}, Key) -> %% Receiving server sends verification request to authoritative server (step 2) -spec step_2(ejabberd_s2s:fromto(), ejabberd_s2s:s2s_dialback_key(), ejabberd_s2s:stream_id()) -> exml:element(). -step_2({LocalServer, RemoteServer}, Key, Id) -> +step_2({LocalServer, RemoteServer}, Key, StreamID) -> #xmlel{name = <<"db:verify">>, attrs = [{<<"from">>, LocalServer}, {<<"to">>, RemoteServer}, - {<<"id">>, Id}], + {<<"id">>, StreamID}], children = [#xmlcdata{content = Key}]}. %% Receiving server is informed by authoritative server that key is valid or invalid (step 3) -spec step_3(ejabberd_s2s:fromto(), ejabberd_s2s:stream_id(), boolean()) -> exml:element(). -step_3({LocalServer, RemoteServer}, Id, IsValid) -> +step_3({LocalServer, RemoteServer}, StreamID, IsValid) -> #xmlel{name = <<"db:verify">>, attrs = [{<<"from">>, LocalServer}, {<<"to">>, RemoteServer}, - {<<"id">>, Id}, + {<<"id">>, StreamID}, {<<"type">>, is_valid_to_type(IsValid)}]}. %% Receiving server sends valid or invalid verification result to initiating server (step 4) @@ -90,7 +90,7 @@ is_valid_to_type(false) -> <<"invalid">>. -spec parse_key(exml:element()) -> false | {Step :: step_1 | step_2, FromTo :: ejabberd_s2s:fromto(), - Id :: ejabberd_s2s:stream_id(), + StreamID :: ejabberd_s2s:stream_id(), Key :: ejabberd_s2s:s2s_dialback_key()}. parse_key(El = #xmlel{name = <<"db:result">>}) -> %% Initiating Server Sends Dialback Key (Step 1) @@ -103,16 +103,16 @@ parse_key(_) -> parse_key(Step, El) -> FromTo = parse_from_to(El), - Id = exml_query:attr(El, <<"id">>, <<>>), + StreamID = exml_query:attr(El, <<"id">>, <<>>), Key = exml_query:cdata(El), - {Step, FromTo, Id, Key}. + {Step, FromTo, StreamID, Key}. %% Parse dialback verification result. %% Verification result is stored in the `type' attribute and could be `valid' or `invalid'. -spec parse_validity(exml:element()) -> false | {Step :: step_3 | step_4, FromTo :: ejabberd_s2s:fromto(), - Id :: ejabberd_s2s:stream_id(), + StreamID :: ejabberd_s2s:stream_id(), IsValid :: boolean()}. parse_validity(El = #xmlel{name = <<"db:verify">>}) -> %% Receiving Server is Informed by Authoritative Server that Key is Valid or Invalid (Step 3) @@ -125,9 +125,9 @@ parse_validity(_) -> parse_validity(Step, El) -> FromTo = parse_from_to(El), - Id = exml_query:attr(El, <<"id">>, <<>>), + StreamID = exml_query:attr(El, <<"id">>, <<>>), IsValid = exml_query:attr(El, <<"type">>) =:= <<"valid">>, - {Step, FromTo, Id, IsValid}. + {Step, FromTo, StreamID, IsValid}. -spec parse_from_to(exml:element()) -> ejabberd_s2s:fromto(). parse_from_to(El) -> @@ -138,7 +138,7 @@ parse_from_to(El) -> %% We use fromto() as seen by ejabberd_s2s_out and ejabberd_s2s {LLocalServer, LRemoteServer}. --spec make_key(fromto(), ejabberd_s2s:stream_id(), ejabberd_s2s:base16_secret()) -> +-spec make_key(ejabberd_s2s:fromto(), ejabberd_s2s:stream_id(), ejabberd_s2s:base16_secret()) -> ejabberd_s2s:s2s_dialback_key(). make_key({From, To}, StreamID, Secret) -> SecretHashed = base16:encode(crypto:hash(sha256, Secret)), From 95a3d8b11c52981947afcbd4e0999b923aed9a43 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 21:37:36 +0200 Subject: [PATCH 100/161] Handle negative needed_extra_connections_number_if_allowed --- src/ejabberd_s2s.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index 4ad566acf92..d647ead886b 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -202,8 +202,9 @@ find_connection(From, To) -> ensure_enough_connections(FromTo, OldCons) -> NeededConnections = mongoose_s2s_lib:needed_extra_connections_number_if_allowed(FromTo, OldCons), - case NeededConnections of - 0 -> + %% Could be negative, if we have too many connections + case NeededConnections =< 0 of + true -> OldCons; _ -> open_new_connections(NeededConnections, FromTo), From 6ca0ee1406ba1f153f81d89520331ab1edab5b47 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 7 Jul 2023 23:46:46 +0200 Subject: [PATCH 101/161] Use fromto_to_attrs in mongoose_s2s_dialback --- src/s2s/mongoose_s2s_dialback.erl | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/src/s2s/mongoose_s2s_dialback.erl b/src/s2s/mongoose_s2s_dialback.erl index 1844f0fea71..2e579f2523e 100644 --- a/src/s2s/mongoose_s2s_dialback.erl +++ b/src/s2s/mongoose_s2s_dialback.erl @@ -52,37 +52,36 @@ %% Initiating server sends dialback key %% https://xmpp.org/extensions/xep-0220.html#example-1 -spec step_1(ejabberd_s2s:fromto(), ejabberd_s2s:s2s_dialback_key()) -> exml:element(). -step_1({LocalServer, RemoteServer}, Key) -> +step_1(FromTo, Key) -> #xmlel{name = <<"db:result">>, - attrs = [{<<"from">>, LocalServer}, - {<<"to">>, RemoteServer}], + attrs = fromto_to_attrs(FromTo), children = [#xmlcdata{content = Key}]}. %% Receiving server sends verification request to authoritative server (step 2) -spec step_2(ejabberd_s2s:fromto(), ejabberd_s2s:s2s_dialback_key(), ejabberd_s2s:stream_id()) -> exml:element(). -step_2({LocalServer, RemoteServer}, Key, StreamID) -> +step_2(FromTo, Key, StreamID) -> #xmlel{name = <<"db:verify">>, - attrs = [{<<"from">>, LocalServer}, - {<<"to">>, RemoteServer}, - {<<"id">>, StreamID}], + attrs = [{<<"id">>, StreamID} | fromto_to_attrs(FromTo)], children = [#xmlcdata{content = Key}]}. %% Receiving server is informed by authoritative server that key is valid or invalid (step 3) -spec step_3(ejabberd_s2s:fromto(), ejabberd_s2s:stream_id(), boolean()) -> exml:element(). -step_3({LocalServer, RemoteServer}, StreamID, IsValid) -> +step_3(FromTo, StreamID, IsValid) -> #xmlel{name = <<"db:verify">>, - attrs = [{<<"from">>, LocalServer}, - {<<"to">>, RemoteServer}, - {<<"id">>, StreamID}, - {<<"type">>, is_valid_to_type(IsValid)}]}. + attrs = [{<<"id">>, StreamID}, + {<<"type">>, is_valid_to_type(IsValid)} + | fromto_to_attrs(FromTo)]}. %% Receiving server sends valid or invalid verification result to initiating server (step 4) -spec step_4(ejabberd_s2s:fromto(), boolean()) -> exml:element(). -step_4({LocalServer, RemoteServer}, IsValid) -> +step_4(FromTo, IsValid) -> #xmlel{name = <<"db:result">>, - attrs = [{<<"from">>, LocalServer}, - {<<"to">>, RemoteServer}, - {<<"type">>, is_valid_to_type(IsValid)}]}. + attrs = [{<<"type">>, is_valid_to_type(IsValid)} + | fromto_to_attrs(FromTo)]}. + +-spec fromto_to_attrs(ejabberd_s2s:fromto()) -> [{binary(), binary()}]. +fromto_to_attrs({LocalServer, RemoteServer}) -> + [{<<"from">>, LocalServer}, {<<"to">>, RemoteServer}]. is_valid_to_type(true) -> <<"valid">>; is_valid_to_type(false) -> <<"invalid">>. From 6f3271ed7bfe32904365d54a641c219188088bac Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Sat, 8 Jul 2023 00:12:18 +0200 Subject: [PATCH 102/161] Add missing metrics/comments --- src/ejabberd_s2s.erl | 15 ++++++++------- src/ejabberd_s2s_in.erl | 1 + 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index d647ead886b..f08607f8cbf 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -203,13 +203,13 @@ ensure_enough_connections(FromTo, OldCons) -> NeededConnections = mongoose_s2s_lib:needed_extra_connections_number_if_allowed(FromTo, OldCons), %% Could be negative, if we have too many connections - case NeededConnections =< 0 of + case NeededConnections > 0 of true -> - OldCons; - _ -> open_new_connections(NeededConnections, FromTo), %% Query for s2s pids one more time - get_s2s_out_pids(FromTo) + get_s2s_out_pids(FromTo); + false -> + OldCons end. -spec open_new_connections(N :: pos_integer(), FromTo :: fromto()) -> ok. @@ -223,11 +223,11 @@ open_new_connection(FromTo) -> {ok, Pid} = ejabberd_s2s_out:start(FromTo, new), %% Try to write the Pid into Mnesia/CETS IsRegistered = call_try_register(Pid, FromTo), - %% If successful, create an actual network connection - %% If not successful, remove the process maybe_start_connection(Pid, FromTo, IsRegistered), ok. +%% If registration is successful, create an actual network connection. +%% If not successful, remove the process. -spec maybe_start_connection(Pid :: pid(), FromTo :: fromto(), IsRegistered :: boolean()) -> ok. maybe_start_connection(Pid, FromTo, true) -> ?LOG_INFO(#{what => s2s_new_connection, @@ -243,6 +243,7 @@ set_shared_secret() -> ok. %% Updates the secret across the cluster if needed +-spec set_shared_secret(mongooseim:host_type()) -> ok. set_shared_secret(HostType) -> case mongoose_s2s_lib:check_shared_secret(HostType, get_shared_secret(HostType)) of {update, NewSecret} -> @@ -264,7 +265,7 @@ get_s2s_out_pids(FromTo) -> mongoose_s2s_backend:get_s2s_out_pids(FromTo). %% Returns true if the connection is registered --spec call_try_register(Pid :: pid(), FromTo :: fromto()) -> boolean(). +-spec call_try_register(Pid :: pid(), FromTo :: fromto()) -> IsRegistered :: boolean(). call_try_register(Pid, FromTo) -> mongoose_s2s_backend:try_register(Pid, FromTo). diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index 513f1fa9ac4..6c7dc8dcee1 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -672,6 +672,7 @@ get_tls_xmlel(#state{tls_enabled = false, tls_required = true}) -> attrs = [{<<"xmlns">>, ?NS_TLS}], children = [#xmlel{name = <<"required">>}]}]. +-spec is_local_host_known(ejabberd_s2s:fromto()) -> boolean(). is_local_host_known({LLocalServer, _}) -> mongoose_router:is_registered_route(LLocalServer) orelse mongoose_component:has_component(LLocalServer) From e77160f881fa77595743e88f1446b862d10821f8 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 10 Jul 2023 11:25:29 +0200 Subject: [PATCH 103/161] Improve logging and specs in ejabberd_s2s_in/out Log SSL errors from mongooose_transport --- src/ejabberd_s2s_in.erl | 46 ++++++++++++++++++++++---------------- src/ejabberd_s2s_out.erl | 2 +- src/mongoose_transport.erl | 6 ++--- 3 files changed, 31 insertions(+), 23 deletions(-) diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index 6c7dc8dcee1..82b776889e0 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -54,18 +54,18 @@ -include("mongoose.hrl"). -include("jlib.hrl"). --record(state, {socket, - streamid :: binary(), - shaper, +-record(state, {socket :: mongoose_transport:socket_data(), + streamid :: ejabberd_s2s:stream_id(), + shaper :: shaper:shaper(), tls = false :: boolean(), tls_enabled = false :: boolean(), tls_required = false :: boolean(), tls_cert_verify = false :: boolean(), tls_options :: mongoose_tls:options(), - server :: jid:server() | undefined, + server :: jid:lserver() | undefined, host_type :: mongooseim:host_type() | undefined, authenticated = false :: boolean(), - auth_domain :: binary() | undefined, + auth_domain :: jid:lserver() | undefined, connections = #{} :: map(), timer :: reference() }). @@ -150,14 +150,15 @@ init([Socket, #{shaper := Shaper, tls := TLSOpts}]) -> %%---------------------------------------------------------------------- -spec wait_for_stream(ejabberd:xml_stream_item(), state()) -> fsm_return(). -wait_for_stream({xmlstreamstart, _Name, Attrs}, StateData) -> +wait_for_stream({xmlstreamstart, _Name, Attrs} = Event, StateData) -> case maps:from_list(Attrs) of AttrMap = #{<<"xmlns">> := <<"jabber:server">>, <<"to">> := Server} -> case StateData#state.server of undefined -> case mongoose_domain_api:get_host_type(Server) of {error, not_found} -> - stream_start_error(StateData, mongoose_xmpp_errors:host_unknown()); + Info = #{location => ?LOCATION, last_event => Event}, + stream_start_error(StateData, Info, mongoose_xmpp_errors:host_unknown()); {ok, HostType} -> UseTLS = mongoose_config:get_opt([{s2s, HostType}, use_starttls]), {StartTLS, TLSRequired, TLSCertVerify} = get_tls_params(UseTLS), @@ -171,17 +172,22 @@ wait_for_stream({xmlstreamstart, _Name, Attrs}, StateData) -> start_stream(AttrMap, StateData); _Other -> Msg = <<"The 'to' attribute differs from the originally provided one">>, - stream_start_error(StateData, mongoose_xmpp_errors:host_unknown(?MYLANG, Msg)) + Info = #{location => ?LOCATION, last_event => Event, + expected_server => StateData#state.server, provided_server => Server}, + stream_start_error(StateData, Info, mongoose_xmpp_errors:host_unknown(?MYLANG, Msg)) end; #{<<"xmlns">> := <<"jabber:server">>} -> Msg = <<"The 'to' attribute is missing">>, - stream_start_error(StateData, mongoose_xmpp_errors:improper_addressing(?MYLANG, Msg)); + Info = #{location => ?LOCATION, last_event => Event}, + stream_start_error(StateData, Info, mongoose_xmpp_errors:improper_addressing(?MYLANG, Msg)); _ -> - stream_start_error(StateData, mongoose_xmpp_errors:invalid_namespace()) + Info = #{location => ?LOCATION, last_event => Event}, + stream_start_error(StateData, Info, mongoose_xmpp_errors:invalid_namespace()) end; -wait_for_stream({xmlstreamerror, _}, StateData) -> - ?LOG_WARNING(#{what => s2s_in_wait_for_stream_error}), - stream_start_error(StateData, mongoose_xmpp_errors:xml_not_well_formed()); +wait_for_stream({xmlstreamerror, _} = Event, StateData) -> + Info = #{location => ?LOCATION, last_event => Event, + reason => s2s_in_wait_for_stream_error}, + stream_start_error(StateData, Info, mongoose_xmpp_errors:xml_not_well_formed()); wait_for_stream(timeout, StateData) -> ?LOG_WARNING(#{what => s2s_in_wait_for_stream_timeout}), {stop, normal, StateData}; @@ -189,7 +195,7 @@ wait_for_stream(closed, StateData) -> ?LOG_WARNING(#{what => s2s_in_wait_for_stream_closed}), {stop, normal, StateData}. -start_stream(#{<<"version">> := <<"1.0">>, <<"from">> := RemoteServer}, +start_stream(#{<<"version">> := <<"1.0">>, <<"from">> := RemoteServer} = Event, StateData = #state{tls = true, authenticated = false, server = Server, host_type = HostType}) -> SASL = case StateData#state.tls_enabled of @@ -208,7 +214,8 @@ start_stream(#{<<"version">> := <<"1.0">>, <<"from">> := RemoteServer}, remote_server => RemoteServer, reason => cert_error, cert_error => CertError}), - stream_start_error(StateData, + Info = #{location => ?LOCATION, last_event => Event, reason => error_cert_verif}, + stream_start_error(StateData, Info, mongoose_xmpp_errors:policy_violation(?MYLANG, CertError)); %% We were stopping ejabberd_s2s_out connection in the older version of the code %% from this location. But stopping outgoing connections just because a non-verified @@ -230,14 +237,15 @@ start_stream(#{<<"version">> := <<"1.0">>}, start_stream(#{<<"xmlns:db">> := <<"jabber:server:dialback">>}, StateData) -> send_text(StateData, ?STREAM_HEADER(<<>>)), {next_state, stream_established, StateData}; -start_stream(_, StateData) -> - stream_start_error(StateData, mongoose_xmpp_errors:invalid_xml()). +start_stream(Event, StateData) -> + Info = #{location => ?LOCATION, last_event => Event}, + stream_start_error(StateData, Info, mongoose_xmpp_errors:invalid_xml()). -stream_start_error(StateData, Error) -> +stream_start_error(StateData, Info, Error) -> send_text(StateData, ?STREAM_HEADER(<<>>)), send_element(StateData, Error), send_text(StateData, ?STREAM_TRAILER), - ?LOG_WARNING(#{what => s2s_in_stream_start_error}), + ?LOG_WARNING(Info#{what => s2s_in_stream_start_error, element => Error}), {stop, normal, StateData}. -spec wait_for_feature_request(ejabberd:xml_stream_item(), state() diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index e86ea7f88fe..f58d3e27f21 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -703,7 +703,7 @@ terminate(Reason, StateName, StateData) -> ok; false -> ?LOG_WARNING(#{what => s2s_terminate_non_empty, - state_name => StateName, + state_name => StateName, reason => Reason, queue => lists:sublist(queue:to_list(StateData#state.queue), 10), authenticated => StateData#state.authenticated}) end, diff --git a/src/mongoose_transport.erl b/src/mongoose_transport.erl index 87f358bf068..b1dc8d04b5d 100644 --- a/src/mongoose_transport.erl +++ b/src/mongoose_transport.erl @@ -18,7 +18,6 @@ %% Types %%---------------------------------------------------------------------- --type t() :: any(). -type send_xml_input() :: {xmlstreamelement, exml:element()} | jlib:xmlstreamstart() | jlib:xmlstreamend(). @@ -34,7 +33,7 @@ channel => connection_type(), atom() => any()}. --export_type([t/0, send_xml_input/0, peer/0, peername_return/0, peercert_return/0]). +-export_type([socket_data/0, send_xml_input/0, peer/0, peername_return/0, peercert_return/0]). -type socket_module() :: gen_tcp | mongoose_tls. -type socket() :: gen_tcp:socket() | mongoose_tls:socket(). @@ -286,7 +285,8 @@ handle_info({Tag, _TCPSocket, Data}, {ok, TLSData} -> NewState = process_data(TLSData, State), {noreply, NewState, hibernate_or_timeout(NewState)}; - {error, _Reason} -> + {error, Reason} -> + ?LOG_WARNING(#{what => transport_tls_recv_error, socket => Socket, reason => Reason}), {stop, normal, State} end; handle_info({Tag, _Socket}, State) when Tag == tcp_closed; Tag == ssl_closed -> From a0ac80292a66da08ab19c1d25366f181eed9eced Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 10 Jul 2023 12:50:57 +0200 Subject: [PATCH 104/161] Use ejabberd_sup:start_child/2 to report errors --- src/ejabberd_sup.erl | 11 +++++++++-- src/mongoose_listener_sup.erl | 8 ++++---- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/ejabberd_sup.erl b/src/ejabberd_sup.erl index 09f634bf1a1..e2796588ab7 100644 --- a/src/ejabberd_sup.erl +++ b/src/ejabberd_sup.erl @@ -29,7 +29,7 @@ -behaviour(supervisor). -export([start_link/0, init/1]). --export([start_child/1, stop_child/1]). +-export([start_child/1, start_child/2, stop_child/1]). -include("mongoose_logger.hrl"). @@ -178,13 +178,20 @@ init([]) -> MucIQ, ShaperSup]}}. + start_child(ChildSpec) -> - case supervisor:start_child(ejabberd_sup, ChildSpec) of + start_child(ejabberd_sup, ChildSpec). + +%% This function handles error results from supervisor:start_child +%% It does some logging +start_child(SupName, ChildSpec) -> + case supervisor:start_child(SupName, ChildSpec) of {ok, Pid} -> {ok, Pid}; Other -> Stacktrace = element(2, erlang:process_info(self(), current_stacktrace)), ?LOG_ERROR(#{what => start_child_failed, spec => ChildSpec, + supervisor_name => SupName, reason => Other, stacktrace => Stacktrace}), erlang:error({start_child_failed, Other, ChildSpec}) end. diff --git a/src/mongoose_listener_sup.erl b/src/mongoose_listener_sup.erl index 8628b5124b3..b0b6a9f89a6 100644 --- a/src/mongoose_listener_sup.erl +++ b/src/mongoose_listener_sup.erl @@ -8,6 +8,8 @@ -ignore_xref([start_link/0, init/1]). +-include("mongoose_logger.hrl"). + %% API -spec start_link() -> {ok, pid()}. @@ -16,8 +18,8 @@ start_link() -> -spec start_child(supervisor:child_spec()) -> ok. start_child(ChildSpec) -> - Res = supervisor:start_child(?MODULE, ChildSpec), - check_start_child_result(Res, ChildSpec), + %% Use ejabberd_sup function for extra logging on errors + ejabberd_sup:start_child(?MODULE, ChildSpec), ok. %% Supervisor callbacks @@ -27,5 +29,3 @@ init([]) -> {ok, {#{strategy => one_for_one, intensity => 10, period => 1}, []}}. - -check_start_child_result({ok, _Pid}, _ChildSpec) -> ok. From 8c908fe0d4e1054ccab90a2c79fa698fe7cb66f0 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 10 Jul 2023 17:41:54 +0200 Subject: [PATCH 105/161] Add s2s_dialback_SUITE To test dialback and secrets management Add dialback_with_wrong_key testcase It improves coverage a bit --- big_tests/tests/muc_SUITE.erl | 2 +- big_tests/tests/s2s_SUITE.erl | 62 ++++++++++++++++++++++++++++++---- big_tests/tests/s2s_helper.erl | 6 +--- 3 files changed, 57 insertions(+), 13 deletions(-) diff --git a/big_tests/tests/muc_SUITE.erl b/big_tests/tests/muc_SUITE.erl index 1439f4afb20..5890e4288fd 100644 --- a/big_tests/tests/muc_SUITE.erl +++ b/big_tests/tests/muc_SUITE.erl @@ -318,7 +318,7 @@ rsm_cases() -> rsm_cases_with_offline() -> [pagination_all_with_offline]. suite() -> - s2s_helper:suite(escalus:suite()). + distributed_helper:require_rpc_nodes([mim, fed]) ++ escalus:suite(). %%-------------------------------------------------------------------- %% Init & teardown diff --git a/big_tests/tests/s2s_SUITE.erl b/big_tests/tests/s2s_SUITE.erl index 04239f3b026..2ba0c2d2cb0 100644 --- a/big_tests/tests/s2s_SUITE.erl +++ b/big_tests/tests/s2s_SUITE.erl @@ -10,9 +10,11 @@ -include_lib("escalus/include/escalus.hrl"). -include_lib("exml/include/exml.hrl"). -include_lib("exml/include/exml_stream.hrl"). +-include_lib("eunit/include/eunit.hrl"). %% Module aliases -define(dh, distributed_helper). +-import(distributed_helper, [mim/0, rpc_spec/1, rpc/4]). %%%=================================================================== %%% Suite configuration @@ -34,8 +36,9 @@ all() -> {group, node1_tls_optional_node2_tls_false}, {group, node1_tls_false_node2_tls_required}, - {group, node1_tls_required_node2_tls_false} + {group, node1_tls_required_node2_tls_false}, + {group, dialback} ]. groups() -> @@ -57,16 +60,15 @@ groups() -> {node1_tls_optional_node2_tls_false, [], essentials()}, {node1_tls_false_node2_tls_required, [], negative()}, - {node1_tls_required_node2_tls_false, [], negative()}]. + {node1_tls_required_node2_tls_false, [], negative()}, + {dialback, [], [dialback_key_is_different_on_different_nodes]}]. essentials() -> [simple_message]. -metrics() -> - [s2s_metrics_testing]. - all_tests() -> - [connections_info, nonexistent_user, unknown_domain, malformed_jid | essentials()]. + [connections_info, nonexistent_user, unknown_domain, malformed_jid, + dialback_with_wrong_key | essentials()]. negative() -> [timeout_waiting_for_message]. @@ -84,7 +86,7 @@ connection_cases() -> auth_with_valid_cert_fails_for_other_mechanism_than_external]. suite() -> - s2s_helper:suite(escalus:suite()). + distributed_helper:require_rpc_nodes([mim, mim2, fed]) ++ escalus:suite(). users() -> [alice2, alice, bob]. @@ -103,6 +105,9 @@ end_per_suite(Config) -> escalus:delete_users(Config, escalus:get_users(users())), escalus:end_per_suite(Config). +init_per_group(dialback, Config) -> + %% Tell mnesia that mim and mim2 nodes are clustered + distributed_helper:add_node_to_cluster(distributed_helper:mim2(), Config); init_per_group(GroupName, Config) -> s2s_helper:configure_s2s(GroupName, Config). @@ -215,6 +220,23 @@ malformed_jid(Config) -> end). +dialback_with_wrong_key(_Config) -> + HostType = domain_helper:host_type(mim), + MimDomain = domain_helper:domain(mim), + FedDomain = domain_helper:domain(fed), + FromTo = {MimDomain, FedDomain}, + Key = <<"123456">>, %% wrong key + StreamId = <<"sdfdsferrr">>, + StartType = {verify, self(), Key, StreamId}, + {ok, _} = rpc(rpc_spec(mim), ejabberd_s2s_out, start, [FromTo, StartType]), + receive + %% Remote server (fed1) rejected out request + {'$gen_event', {validity_from_s2s_out, false, FromTo}} -> + ok + after 5000 -> + ct:fail(timeout) + end. + nonascii_addr(Config) -> escalus:fresh_story(Config, [{alice, 1}, {bob2, 1}], fun(Alice, Bob) -> @@ -422,3 +444,29 @@ get_main_key_and_cert_files(Config) -> get_main_file_path(Config, File) -> filename:join([path_helper:repo_dir(Config), "tools", "ssl", "mongooseim", File]). + +dialback_key_is_different_on_different_nodes(_Config) -> + configure_secret_and_restart_s2s(mim), + configure_secret_and_restart_s2s(mim2), + Key1 = get_shared_secret(mim), + Key2 = get_shared_secret(mim2), + ?assertEqual(Key1, Key2), + %% Node 2 is restarted later, so both nodes should have the key. + ?assertEqual(Key2, {ok, <<"9e438f25e81cf347100b">>}). + +get_shared_secret(NodeKey) -> + HostType = domain_helper:host_type(mim), + rpc(rpc_spec(NodeKey), mongoose_s2s_backend, get_shared_secret, [HostType]). + +set_opt(Spec, Opt, Value) -> + rpc(Spec, mongoose_config, set_opt, [Opt, Value]). + +configure_secret_and_restart_s2s(NodeKey) -> + HostType = domain_helper:host_type(mim), + Spec = rpc_spec(NodeKey), + set_opt(Spec, [{s2s, HostType}, shared], shared_secret(NodeKey)), + ok = rpc(Spec, supervisor, terminate_child, [ejabberd_sup, ejabberd_s2s]), + {ok, _} = rpc(Spec, supervisor, restart_child, [ejabberd_sup, ejabberd_s2s]). + +shared_secret(mim) -> <<"f623e54a0741269be7dd">>; %% Some random key +shared_secret(mim2) -> <<"9e438f25e81cf347100b">>. diff --git a/big_tests/tests/s2s_helper.erl b/big_tests/tests/s2s_helper.erl index 539a83f81b7..69b7c338657 100644 --- a/big_tests/tests/s2s_helper.erl +++ b/big_tests/tests/s2s_helper.erl @@ -1,15 +1,11 @@ -module(s2s_helper). --export([suite/1]). -export([init_s2s/1]). -export([end_s2s/1]). -export([configure_s2s/2]). --import(distributed_helper, [fed/0, mim/0, rpc_spec/1, require_rpc_nodes/1, rpc/4]). +-import(distributed_helper, [rpc_spec/1, rpc/4]). -import(domain_helper, [host_type/1]). -suite(Config) -> - require_rpc_nodes(node_keys()) ++ Config. - init_s2s(Config) -> [{{s2s, NodeKey}, get_s2s_opts(NodeKey)} || NodeKey <- node_keys()] ++ [{escalus_user_db, xmpp} | Config]. From 02dddfebaeaa0d40075b691b1935c5e1e645e671 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 11 Jul 2023 09:32:09 +0200 Subject: [PATCH 106/161] Use main branch for CETS library --- rebar.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rebar.config b/rebar.config index ac88130d5fe..080e81f6c49 100644 --- a/rebar.config +++ b/rebar.config @@ -80,7 +80,7 @@ {cache_tab, "1.0.30"}, {segmented_cache, "0.3.0"}, {worker_pool, "6.0.1"}, - {cets, {git, "https://github.com/esl/cets.git", {branch, "mu-conflict-handler"}}}, + {cets, {git, "https://github.com/esl/cets.git", {branch, "main"}}}, %%% HTTP tools {graphql, {git, "https://github.com/esl/graphql-erlang.git", {branch, "master"}}}, From adc892c69dfb3dcb4febc2cd211425f8dc51a062 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 11 Jul 2023 09:44:56 +0200 Subject: [PATCH 107/161] Document s2s_backend option --- doc/configuration/general.md | 11 ++++++++++- doc/configuration/release-options.md | 7 +++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/doc/configuration/general.md b/doc/configuration/general.md index ebddbdfc65c..a2ab620bbf7 100644 --- a/doc/configuration/general.md +++ b/doc/configuration/general.md @@ -142,7 +142,7 @@ According to RFC 6210, even when a client sends invalid data after opening a con These options can be used to configure the way MongooseIM manages user sessions. ### `general.sm_backend` -* **Syntax:** string, `"mnesia"` or `"redis"` +* **Syntax:** string, `"mnesia"`, `"cets"` or `"redis"` * **Default:** `"mnesia"` * **Example:** `sm_backend = "redis"` @@ -157,6 +157,15 @@ See the section about [redis connection setup](./outgoing-connections.md#redis-s When a user's session is replaced (due to a full JID conflict) by a new one, this parameter specifies the time MongooseIM waits for the old sessions to close. The default value is sufficient in most cases. If you observe `replaced_wait_timeout` warning in logs, then most probably the old sessions are frozen for some reason and it should be investigated. +## XMPP federation (S2S) + +### `general.s2s_backend` +* **Syntax:** string, `"mnesia"`, `"cets"` +* **Default:** `"mnesia"` +* **Example:** `s2s_backend = "cets"` + +Backend for replicating the list of outgoing Server to Server (S2S) connections across the nodes of the local MongooseIM cluster. + ## Message routing The following options influence the way MongooseIM routes incoming messages to their recipients. diff --git a/doc/configuration/release-options.md b/doc/configuration/release-options.md index 3b99b1097f6..22937368cda 100644 --- a/doc/configuration/release-options.md +++ b/doc/configuration/release-options.md @@ -197,6 +197,13 @@ These options are inserted into the `rel/files/mongooseim.toml` template. * **Syntax:** string * **Example:** `{sm_backend, "\"redis\""}.` +### s2s_backend + +* **Type:** parameter +* **Option:** [`general.s2s_backend`](general.md#generals2s_backend) +* **Syntax:** string +* **Example:** `{s2s_backend, "\"mnesia\""}.` + ### tls_config * **Type:** block From 8fcfceeb752fb0399415f642d250956d701bccbc Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 20 Jul 2023 14:32:59 +0200 Subject: [PATCH 108/161] Imrove get_connection function --- big_tests/tests/s2s_SUITE.erl | 10 ++- src/ejabberd_s2s_in.erl | 91 +++++++++++++------------- src/ejabberd_s2s_out.erl | 117 ++++++++++++++++++---------------- src/s2s/mongoose_s2s_info.erl | 64 ++++++++----------- 4 files changed, 139 insertions(+), 143 deletions(-) diff --git a/big_tests/tests/s2s_SUITE.erl b/big_tests/tests/s2s_SUITE.erl index 2ba0c2d2cb0..f01ea060ddf 100644 --- a/big_tests/tests/s2s_SUITE.erl +++ b/big_tests/tests/s2s_SUITE.erl @@ -164,14 +164,12 @@ connections_info(Config) -> [_ | _] = get_s2s_connections(?dh:mim(), FedDomain, out), ok. -get_s2s_connections(RPCSpec, Domain, Type)-> - AllS2SConnections = ?dh:rpc(RPCSpec, mongoose_s2s_info, get_info_s2s_connections, [Type]), - % ct:pal("Node = ~p, ConnectionType = ~p~nAllS2SConnections(~p): ~p", - % [maps:get(node, RPCSpec), Type, length(AllS2SConnections), AllS2SConnections]), +get_s2s_connections(RPCSpec, Domain, Type) -> + AllS2SConnections = ?dh:rpc(RPCSpec, mongoose_s2s_info, get_connections, [Type]), DomainS2SConnections = [Connection || Connection <- AllS2SConnections, - Type =/= in orelse [Domain] =:= proplists:get_value(domains, Connection), - Type =/= out orelse Domain =:= proplists:get_value(server, Connection)], + Type =/= in orelse [Domain] =:= maps:get(domains, Connection), + Type =/= out orelse Domain =:= maps:get(server, Connection)], ct:pal("Node = ~p, ConnectionType = ~p, Domain = ~s~nDomainS2SConnections(~p): ~p", [maps:get(node, RPCSpec), Type, Domain, length(DomainS2SConnections), DomainS2SConnections]), diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index 82b776889e0..ba03fdcdc51 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -48,6 +48,8 @@ handle_info/3, terminate/3]). +-export_type([connection_info/0]). + -ignore_xref([match_domain/2, start/2, start_link/2, stream_established/2, wait_for_feature_request/2, wait_for_stream/2]). @@ -71,6 +73,20 @@ }). -type state() :: #state{}. +-type connection_info() :: + #{pid => pid(), + direction => in, + statename => statename(), + addr => inet:ip_address(), + port => inet:port_number(), + streamid => ejabberd_s2s:stream_id(), + tls => boolean(), + tls_enabled => boolean(), + tls_options => mongoose_tls:options(), + authenticated => boolean(), + shaper => shaper:shaper(), + domains => [jid:lserver()]}. + -type statename() :: 'stream_established' | 'wait_for_feature_request'. %% FSM handler return value -type fsm_return() :: {'stop', Reason :: 'normal', state()} @@ -458,53 +474,12 @@ route_stanza(Acc) -> handle_event(_Event, StateName, StateData) -> {next_state, StateName, StateData}. -%%---------------------------------------------------------------------- -%% Func: handle_sync_event/4 -%% Returns: The associated StateData for this connection -%% {reply, Reply, NextStateName, NextStateData} -%% Reply = {state_infos, [{InfoName::atom(), InfoValue::any()] -%%---------------------------------------------------------------------- --spec handle_sync_event(any(), any(), statename(), state() - ) -> {'reply', 'ok' | {'state_infos', [any(), ...]}, atom(), state()}. -handle_sync_event(get_state_infos, _From, StateName, StateData) -> - {ok, {Addr, Port}} = mongoose_transport:peername(StateData#state.socket), - Domains = case StateData#state.authenticated of - true -> - [StateData#state.auth_domain]; - false -> - Connections = StateData#state.connections, - [LRemoteServer || {{_, LRemoteServer}, established} <- - maps:to_list(Connections)] - end, - Infos = [ - {direction, in}, - {statename, StateName}, - {addr, Addr}, - {port, Port}, - {streamid, StateData#state.streamid}, - {tls, StateData#state.tls}, - {tls_enabled, StateData#state.tls_enabled}, - {tls_options, StateData#state.tls_options}, - {authenticated, StateData#state.authenticated}, - {shaper, StateData#state.shaper}, - {domains, Domains} - ], - Reply = {state_infos, Infos}, - {reply, Reply, StateName, StateData}; - -%%---------------------------------------------------------------------- -%% Func: handle_sync_event/4 -%% Returns: {next_state, NextStateName, NextStateData} | -%% {next_state, NextStateName, NextStateData, Timeout} | -%% {reply, Reply, NextStateName, NextStateData} | -%% {reply, Reply, NextStateName, NextStateData, Timeout} | -%% {stop, Reason, NewStateData} | -%% {stop, Reason, Reply, NewStateData} -%%---------------------------------------------------------------------- +-spec handle_sync_event(any(), any(), statename(), state()) -> + {reply, ok | connection_info(), statename(), state()}. +handle_sync_event(get_state_info, _From, StateName, StateData) -> + {reply, handle_get_state_info(StateName, StateData), StateName, StateData}; handle_sync_event(_Event, _From, StateName, StateData) -> - Reply = ok, - {reply, Reply, StateName, StateData}. - + {reply, ok, StateName, StateData}. code_change(_OldVsn, StateName, StateData, _Extra) -> {ok, StateName, StateData}. @@ -693,3 +668,27 @@ is_known_domain(Domain) -> _ -> false end. + +-spec handle_get_state_info(statename(), state()) -> connection_info(). +handle_get_state_info(StateName, StateData) -> + {ok, {Addr, Port}} = mongoose_transport:peername(StateData#state.socket), + Domains = case StateData#state.authenticated of + true -> + [StateData#state.auth_domain]; + false -> + Connections = StateData#state.connections, + [LRemoteServer || {{_, LRemoteServer}, established} <- + maps:to_list(Connections)] + end, + #{pid => self(), + direction => in, + statename => StateName, + addr => Addr, + port => Port, + streamid => StateData#state.streamid, + tls => StateData#state.tls, + tls_enabled => StateData#state.tls_enabled, + tls_options => StateData#state.tls_options, + authenticated => StateData#state.authenticated, + shaper => StateData#state.shaper, + domains => Domains}. diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index f58d3e27f21..12ba81804cc 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -55,19 +55,23 @@ print_state/1, code_change/4]). +-export_type([connection_info/0]). + -ignore_xref([open_socket/2, print_state/1, reopen_socket/2, start_link/2, stream_established/2, wait_before_retry/2, wait_for_auth_result/2, wait_for_features/2, wait_for_starttls_proceed/2, wait_for_stream/2, wait_for_stream/2, wait_for_validation/2]). +-type verify_requester() :: false | {S2SIn :: pid(), Key :: ejabberd_s2s:s2s_dialback_key(), SID :: ejabberd_s2s:stream_id()}. + -include("mongoose.hrl"). -include("jlib.hrl"). -record(state, {socket, - streamid, - remote_streamid = <<>>, - use_v10, + streamid :: ejabberd_s2s:stream_id() | undefined, + remote_streamid = <<>> :: ejabberd_s2s:stream_id(), + use_v10 :: boolean(), tls = false :: boolean(), tls_required = false :: boolean(), tls_enabled = false :: boolean(), @@ -76,16 +80,37 @@ dialback_enabled = true :: boolean(), try_auth = true :: boolean(), from_to :: ejabberd_s2s:fromto(), - myname, server, queue, + myname :: jid:lserver(), + server :: jid:lserver(), + queue :: element_queue(), host_type :: mongooseim:host_type(), - delay_to_retry = undefined_delay, - %% is_registered + delay_to_retry = undefined :: undefined | non_neg_integer(), is_registered = false :: boolean(), - verify = false :: false | {pid(), Key :: binary(), SID :: binary()}, + verify = false :: verify_requester(), timer :: reference() }). -type state() :: #state{}. +-type connection_info() :: + #{pid => pid(), + direction => out, + statename => statename(), + addr => unknown | inet:ip_address(), + port => unknown | inet:port_number(), + streamid => ejabberd_s2s:stream_id() | undefined, + use_v10 => boolean(), + tls => boolean(), + tls_required => boolean(), + tls_enabled => boolean(), + tls_options => mongoose_tls:options(), + authenticated => boolean(), + dialback_enabled => boolean(), + try_auth => boolean(), + myname => jid:lserver(), + server => jid:lserver(), + delay_to_retry => undefined | non_neg_integer(), + verify => verify_requester()}. + -type element_queue() :: queue:queue(#xmlel{}). -type statename() :: open_socket | wait_for_stream @@ -588,53 +613,10 @@ stream_established(closed, StateData) -> handle_event(_Event, StateName, StateData) -> next_state(StateName, StateData). -%%---------------------------------------------------------------------- -%% Func: handle_sync_event/4 -%% Returns: The associated StateData for this connection -%% {reply, Reply, NextStateName, NextStateData} -%% Reply = {state_infos, [{InfoName::atom(), InfoValue::any()] -%%---------------------------------------------------------------------- -handle_sync_event(get_state_infos, _From, StateName, StateData) -> - {Addr, Port} = try mongoose_transport:peername(StateData#state.socket) of - {ok, {A, P}} -> {A, P} - catch - _:_ -> - {unknown, unknown} - end, - Infos = [ - {direction, out}, - {statename, StateName}, - {addr, Addr}, - {port, Port}, - {streamid, StateData#state.streamid}, - {use_v10, StateData#state.use_v10}, - {tls, StateData#state.tls}, - {tls_required, StateData#state.tls_required}, - {tls_enabled, StateData#state.tls_enabled}, - {tls_options, StateData#state.tls_options}, - {authenticated, StateData#state.authenticated}, - {dialback_enabled, StateData#state.dialback_enabled}, - {try_auth, StateData#state.try_auth}, - {myname, StateData#state.myname}, - {server, StateData#state.server}, - {delay_to_retry, StateData#state.delay_to_retry}, - {verify, StateData#state.verify} - ], - Reply = {state_infos, Infos}, - {reply, Reply, StateName, StateData}; - -%%---------------------------------------------------------------------- -%% Func: handle_sync_event/4 -%% Returns: {next_state, NextStateName, NextStateData} | -%% {next_state, NextStateName, NextStateData, Timeout} | -%% {reply, Reply, NextStateName, NextStateData} | -%% {reply, Reply, NextStateName, NextStateData, Timeout} | -%% {stop, Reason, NewStateData} | -%% {stop, Reason, Reply, NewStateData} -%%---------------------------------------------------------------------- +handle_sync_event(get_state_info, _From, StateName, StateData) -> + {reply, handle_get_state_info(StateName, StateData), StateName, StateData}; handle_sync_event(_Event, _From, StateName, StateData) -> - Reply = ok, - {reply, Reply, StateName, StateData, get_timeout_interval(StateName)}. + {reply, ok, StateName, StateData, get_timeout_interval(StateName)}. code_change(_OldVsn, StateName, StateData, _Extra) -> @@ -1003,7 +985,7 @@ wait_before_reconnect(StateData) -> bounce_messages(E), cancel_timer(StateData#state.timer), Delay = case StateData#state.delay_to_retry of - undefined_delay -> + undefined -> %% The initial delay is random between 1 and 15 seconds %% Return a random integer between 1000 and 15000 MicroSecs = erlang:system_time(microsecond), @@ -1150,3 +1132,30 @@ handle_parsed_features({_, _, _, StateData}) -> mongoose_transport:close(StateData#state.socket), {next_state, reopen_socket, StateData#state{socket = undefined, use_v10 = false}, ?FSMTIMEOUT}. + +handle_get_state_info(StateName, StateData) -> + {Addr, Port} = get_peername(StateData#state.socket), + #{pid => self(), + direction => out, + statename => StateName, + addr => Addr, + port => Port, + streamid => StateData#state.streamid, + use_v10 => StateData#state.use_v10, + tls => StateData#state.tls, + tls_required => StateData#state.tls_required, + tls_enabled => StateData#state.tls_enabled, + tls_options => StateData#state.tls_options, + authenticated => StateData#state.authenticated, + dialback_enabled => StateData#state.dialback_enabled, + try_auth => StateData#state.try_auth, + myname => StateData#state.myname, + server => StateData#state.server, + delay_to_retry => StateData#state.delay_to_retry, + verify => StateData#state.verify}. + +get_peername(undefined) -> + {unknown, unknown}; +get_peername(Socket) -> + {ok, {Addr, Port}} = mongoose_transport:peername(Socket), + {Addr, Port}. diff --git a/src/s2s/mongoose_s2s_info.erl b/src/s2s/mongoose_s2s_info.erl index 5fb5bfa377e..79a75679cb3 100644 --- a/src/s2s/mongoose_s2s_info.erl +++ b/src/s2s/mongoose_s2s_info.erl @@ -1,46 +1,36 @@ -%% Some ugly code only used in tests. -%% It was originally in ejabberd_s2s, but it was moved out to improve readability. +%% Get information about S2S connections on this node. -module(mongoose_s2s_info). %% ejabberd API --export([get_info_s2s_connections/1]). --ignore_xref([get_info_s2s_connections/1]). +-export([get_connections/1]). +-ignore_xref([get_connections/1]). --type connstate() :: 'restarting' | 'undefined' | pid(). --type conn() :: { any(), connstate(), 'supervisor' | 'worker', 'dynamic' | [_] }. +-include("mongoose_logger.hrl"). + +-type direction() :: in | out. +-type supervisor_child_spec() :: { undefined, pid(), worker, [module()] }. +-type connection_info() :: ejabberd_s2s_in:connection_info() | ejabberd_s2s_out:connection_info(). %% @doc Get information about S2S connections of the specified type. --spec get_info_s2s_connections('in' | 'out') -> [[{atom(), any()}, ...]]. -get_info_s2s_connections(Type) -> - ChildType = case Type of - in -> ejabberd_s2s_in_sup; - out -> ejabberd_s2s_out_sup - end, - Connections = supervisor:which_children(ChildType), - get_s2s_info(Connections, Type). +-spec get_connections(direction()) -> [connection_info()]. +get_connections(Type) -> + Specs = supervisor:which_children(type_to_supervisor(Type)), + [Conn || Spec <- Specs, Conn <- get_state_info(child_to_pid(Spec))]. --spec get_s2s_info(Connections :: [conn()], - Type :: 'in' | 'out' - ) -> [[{any(), any()}, ...]]. % list of lists -get_s2s_info(Connections, Type)-> - complete_s2s_info(Connections, Type, []). +%% Both supervisors are simple_one_for_one with temporary children processes. +-spec type_to_supervisor(direction()) -> atom(). +type_to_supervisor(in) -> ejabberd_s2s_in_sup; +type_to_supervisor(out) -> ejabberd_s2s_out_sup. --spec complete_s2s_info(Connections :: [conn()], - Type :: 'in' | 'out', - Result :: [[{any(), any()}, ...]] % list of lists - ) -> [[{any(), any()}, ...]]. % list of lists -complete_s2s_info([], _, Result)-> - Result; -complete_s2s_info([Connection|T], Type, Result)-> - {_, PID, _, _} = Connection, - State = get_s2s_state(PID), - complete_s2s_info(T, Type, [State|Result]). +-spec child_to_pid(supervisor_child_spec()) -> pid(). +child_to_pid({_, Pid, _, _}) -> Pid. --spec get_s2s_state(connstate()) -> [{atom(), any()}, ...]. -get_s2s_state(S2sPid) -> - Infos = case gen_fsm_compat:sync_send_all_state_event(S2sPid, get_state_infos) of - {state_infos, Is} -> [{status, open} | Is]; - {noproc, _} -> [{status, closed}]; %% Connection closed - {badrpc, _} -> [{status, error}] - end, - [{s2s_pid, S2sPid} | Infos]. +-spec get_state_info(pid()) -> [connection_info()]. +get_state_info(Pid) when is_pid(Pid) -> + case gen_fsm_compat:sync_send_all_state_event(Pid, get_state_info) of + Info when is_map(Info) -> + [Info]; + Other -> + ?LOG_ERROR(#{what => s2s_get_state_info_failed, pid => Pid, reason => Other}), + [] + end. From 936e2b165c43e539c8c9d93011df5a00fe5a1b4b Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 20 Jul 2023 14:37:36 +0200 Subject: [PATCH 109/161] Fix style and language --- src/ejabberd_sup.erl | 1 - src/s2s/mongoose_s2s_lib.erl | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/ejabberd_sup.erl b/src/ejabberd_sup.erl index e2796588ab7..3295359b160 100644 --- a/src/ejabberd_sup.erl +++ b/src/ejabberd_sup.erl @@ -178,7 +178,6 @@ init([]) -> MucIQ, ShaperSup]}}. - start_child(ChildSpec) -> start_child(ejabberd_sup, ChildSpec). diff --git a/src/s2s/mongoose_s2s_lib.erl b/src/s2s/mongoose_s2s_lib.erl index 8892dcf304f..e1df46831e9 100644 --- a/src/s2s/mongoose_s2s_lib.erl +++ b/src/s2s/mongoose_s2s_lib.erl @@ -138,14 +138,14 @@ needed_extra_connections_number_if_allowed(FromTo, OldCons) -> %% Checks: %% - if the host is not a service -%% - and if the s2s host is not blacklisted or is in whitelist +%% - and host policy (allowlist or denylist) -spec is_s2s_allowed_for_host(fromto(), _OldConnections :: s2s_pids()) -> boolean(). is_s2s_allowed_for_host(_FromTo, [_|_]) -> true; %% Has outgoing connections established, skip the check is_s2s_allowed_for_host(FromTo, []) -> not is_service(FromTo) andalso allow_host(FromTo). -%% Check if host is in blacklist or white list +%% Checks if the s2s host is not in the denylist or is in the allowlist %% Runs a hook -spec allow_host(fromto()) -> boolean(). allow_host({FromServer, ToServer}) -> From 47af73845d35b6be4b63bbd11dd205d8d3e8e972 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 20 Jul 2023 15:31:39 +0200 Subject: [PATCH 110/161] Add record specs in mongoose_s2s_cets --- src/s2s/mongoose_s2s_cets.erl | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/s2s/mongoose_s2s_cets.erl b/src/s2s/mongoose_s2s_cets.erl index 536a4fc6dca..e97053f1551 100644 --- a/src/s2s/mongoose_s2s_cets.erl +++ b/src/s2s/mongoose_s2s_cets.erl @@ -18,6 +18,8 @@ -define(TABLE, cets_s2s_session). -define(SECRET_TABLE, cets_s2s_secret). +-type secret_tuple() :: {HostType :: mongooseim:host_type(), TS :: integer(), Secret :: ejabberd_s2s:base16_secret()}. + -spec init(map()) -> ok. init(_) -> cets:start(?TABLE, #{}), @@ -28,14 +30,16 @@ init(_) -> cets_discovery:add_table(mongoose_cets_discovery, ?SECRET_TABLE), ok. -%% Store the most recent value: -%% - first element of the tuple is the same and it is the key. -%% - second element is a timestamp, so comparing tuples works. +%% Chooses the most recent value of two. %% Even if we choose the wrong record - nothing bad would happen %% (we still need to choose one). %% Choosing the record with the highest timestamp is just a logical behaviour %% (it also matches the logic of mongoose_s2s_lib:check_shared_secret/2, where updated secret %% in the config is updated across all nodes in the cluster). +%% Example call: +%% handle_secret_conflict({<<"localhost">>, 1689858975612268, <<"4e48dc4898b23f512059">>}, +%% {<<"localhost">>, 1689859177195451, <<"56fdcd3ec63ff8299eb0">>}). +-spec handle_secret_conflict(secret_tuple(), secret_tuple()) -> secret_tuple(). handle_secret_conflict(Rec1, Rec2) when Rec1 > Rec2 -> Rec1; handle_secret_conflict(_Rec1, Rec2) -> From 1ee1ccc37d9636bbd74a1e6f90ec63a19ddc64db Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 20 Jul 2023 15:37:34 +0200 Subject: [PATCH 111/161] Move xeps into mongoose_s2s_dialback --- big_tests/tests/s2s_SUITE.erl | 4 ++-- src/ejabberd_s2s.erl | 2 -- src/s2s/mongoose_s2s_dialback.erl | 3 +++ 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/big_tests/tests/s2s_SUITE.erl b/big_tests/tests/s2s_SUITE.erl index f01ea060ddf..a7dd3798779 100644 --- a/big_tests/tests/s2s_SUITE.erl +++ b/big_tests/tests/s2s_SUITE.erl @@ -61,7 +61,7 @@ groups() -> {node1_tls_false_node2_tls_required, [], negative()}, {node1_tls_required_node2_tls_false, [], negative()}, - {dialback, [], [dialback_key_is_different_on_different_nodes]}]. + {dialback, [], [dialback_key_is_synchronized_on_different_nodes]}]. essentials() -> [simple_message]. @@ -443,7 +443,7 @@ get_main_file_path(Config, File) -> filename:join([path_helper:repo_dir(Config), "tools", "ssl", "mongooseim", File]). -dialback_key_is_different_on_different_nodes(_Config) -> +dialback_key_is_synchronized_on_different_nodes(_Config) -> configure_secret_and_restart_s2s(mim), configure_secret_and_restart_s2s(mim2), Key1 = get_shared_secret(mim), diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index f08607f8cbf..1bc0c324676 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -26,8 +26,6 @@ -module(ejabberd_s2s). -author('alexey@process-one.net'). --xep([{xep, 185}, {version, "1.0"}]). - -behaviour(gen_server). -behaviour(xmpp_router). diff --git a/src/s2s/mongoose_s2s_dialback.erl b/src/s2s/mongoose_s2s_dialback.erl index 2e579f2523e..ea522a9e730 100644 --- a/src/s2s/mongoose_s2s_dialback.erl +++ b/src/s2s/mongoose_s2s_dialback.erl @@ -46,6 +46,9 @@ -export([make_key/3]). +-xep([{xep, 185}, {version, "1.0"}]). %% Dialback Key Generation and Validation +-xep([{xep, 220}, {version, "1.1.1"}]). %% Server Dialback + -include("mongoose.hrl"). -include("jlib.hrl"). From 300b37c11d723e241e0df5b60d2e364af795cc3d Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 21 Jul 2023 12:43:13 +0200 Subject: [PATCH 112/161] Address minor review comments (code style) --- src/ejabberd_s2s_out.erl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index 12ba81804cc..0721449ce81 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -84,7 +84,7 @@ server :: jid:lserver(), queue :: element_queue(), host_type :: mongooseim:host_type(), - delay_to_retry = undefined :: undefined | non_neg_integer(), + delay_to_retry :: non_neg_integer() | undefined, is_registered = false :: boolean(), verify = false :: verify_requester(), timer :: reference() @@ -358,7 +358,9 @@ wait_for_validation({xmlstreamelement, El}, StateData = #state{from_to = FromTo} from_to => FromTo, stream_id => StreamID, is_valid => IsValid}), case StateData#state.verify of false -> - %% TODO: Should'nt we close the connection here ? + %% This is unexpected condition. + %% We've received step_3 reply, but there is no matching outgoing connection. + %% We could close the connection here. next_state(wait_for_validation, StateData); {Pid, _Key, _SID} -> ejabberd_s2s_in:send_validity_from_s2s_out(Pid, IsValid, FromTo), From 206722438a50d8bd2bcf282071bb51f877e32ea5 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 30 Jun 2023 06:22:32 +0200 Subject: [PATCH 113/161] Change register_room output --- src/mod_muc.erl | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/mod_muc.erl b/src/mod_muc.erl index 975ce2d2688..688290b092f 100644 --- a/src/mod_muc.erl +++ b/src/mod_muc.erl @@ -918,15 +918,17 @@ start_new_room(HostType, ServerHost, MucHost, Access, Room, register_room_or_stop_if_duplicate(HostType, MucHost, Room, Pid) -> case register_room(HostType, MucHost, Room, Pid) of - {_, ok} -> + ok -> {ok, Pid}; - {_, {exists, OldPid}} -> + {exists, OldPid} -> mod_muc_room:stop(Pid), - {ok, OldPid} + {ok, OldPid}; + {error, Reason} -> + error({failed_to_register, MucHost, Room, Pid, Reason}) end. -spec register_room(HostType :: host_type(), jid:server(), room(), - pid()) -> {'aborted', _} | {'atomic', _}. + pid()) -> ok | {exists, pid()} | {error, term()}. register_room(HostType, MucHost, Room, Pid) -> F = fun() -> case mnesia:read(muc_online_room, {Room, MucHost}, write) of @@ -938,8 +940,12 @@ register_room(HostType, MucHost, Room, Pid) -> {exists, R#muc_online_room.pid} end end, - mnesia:transaction(F). + simple_transaction_result(mnesia:transaction(F)). +simple_transaction_result({atomic, Res}) -> + Res; +simple_transaction_result({aborted, Reason}) -> + {error, Reason}. -spec room_jid_to_pid(RoomJID :: jid:jid()) -> {ok, pid()} | {error, not_found}. room_jid_to_pid(#jid{luser=RoomName, lserver=MucService}) -> From cad94b6d864de3a22be1db37b6b29e74dfcda19f Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 11 Jul 2023 15:40:06 +0200 Subject: [PATCH 114/161] Move muc_registered record into mod_muc_mnesia --- include/mod_muc.hrl | 5 ----- src/mod_muc.erl | 7 +------ src/mod_muc_mnesia.erl | 5 +++++ 3 files changed, 6 insertions(+), 11 deletions(-) diff --git a/include/mod_muc.hrl b/include/mod_muc.hrl index 45d5a6ade7b..ff8b2f99f5a 100644 --- a/include/mod_muc.hrl +++ b/include/mod_muc.hrl @@ -7,8 +7,3 @@ host_type, pid }). - --record(muc_registered, { - us_host, - nick - }). diff --git a/src/mod_muc.erl b/src/mod_muc.erl index 688290b092f..039e08f17f7 100644 --- a/src/mod_muc.erl +++ b/src/mod_muc.erl @@ -119,11 +119,6 @@ pid :: pid() }. --type muc_registered() :: #muc_registered{ - us_host :: jid:literal_jid(), - nick :: nick() - }. - -type room_event_data() :: #{ from_nick := nick(), from_jid := jid:jid(), @@ -146,7 +141,7 @@ -type state() :: #muc_state{}. --export_type([muc_room/0, muc_registered/0]). +-export_type([muc_room/0]). -define(PROCNAME, ejabberd_mod_muc). diff --git a/src/mod_muc_mnesia.erl b/src/mod_muc_mnesia.erl index 4e61bafe168..5fb4c134389 100644 --- a/src/mod_muc_mnesia.erl +++ b/src/mod_muc_mnesia.erl @@ -41,6 +41,11 @@ -include("jlib.hrl"). -include("mod_muc.hrl"). +-record(muc_registered, { + us_host :: {US :: jid:simple_bare_jid(), MucHost :: jid:lserver()} | '$1', + nick :: mod_muc:nick() + }). + init(_HostType, _Opts) -> mnesia:create_table(muc_room, [{disc_copies, [node()]}, From 106e13f6e5fd9c7a4728e960a196dab9c261c3ba Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 11 Jul 2023 21:28:15 +0200 Subject: [PATCH 115/161] Add mongoose_muc_online_backend support Just one function in mongoose_muc_online_mnesia so far --- big_tests/test.config | 1 + big_tests/tests/domain_removal_SUITE.erl | 2 +- big_tests/tests/gdpr_SUITE.erl | 6 ++--- big_tests/tests/graphql_muc_SUITE.erl | 12 +++++----- big_tests/tests/graphql_muc_light_SUITE.erl | 6 ++--- big_tests/tests/inbox_SUITE.erl | 2 +- big_tests/tests/mam_SUITE.erl | 2 +- .../tests/mod_event_pusher_rabbit_SUITE.erl | 2 +- .../tests/mod_event_pusher_sns_SUITE.erl | 2 +- big_tests/tests/mod_global_distrib_SUITE.erl | 2 +- big_tests/tests/muc_SUITE.erl | 6 ++--- big_tests/tests/muc_helper.erl | 10 +++++--- big_tests/tests/muc_http_api_SUITE.erl | 2 +- src/mod_muc.erl | 10 ++++---- src/muc/mongoose_muc_online_backend.erl | 24 +++++++++++++++++++ src/muc/mongoose_muc_online_mnesia.erl | 18 ++++++++++++++ 16 files changed, 76 insertions(+), 31 deletions(-) create mode 100644 src/muc/mongoose_muc_online_backend.erl create mode 100644 src/muc/mongoose_muc_online_mnesia.erl diff --git a/big_tests/test.config b/big_tests/test.config index 2e8f86de60d..49e0664ec3a 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -244,6 +244,7 @@ {component_backend, "\"cets\""}, {s2s_backend, "\"cets\""}, {stream_management_backend, cets}, + {muc_backend, cets}, {auth_method, "rdbms"}, {internal_databases, "[internal_databases.cets] cluster_name = \"{{cluster_name}}\" diff --git a/big_tests/tests/domain_removal_SUITE.erl b/big_tests/tests/domain_removal_SUITE.erl index 579376bd6ac..953b3280f4a 100644 --- a/big_tests/tests/domain_removal_SUITE.erl +++ b/big_tests/tests/domain_removal_SUITE.erl @@ -162,7 +162,7 @@ is_internal_or_rdbms() -> %%%=================================================================== init_per_testcase(muc_removal, Config) -> - muc_helper:load_muc(), + muc_helper:load_muc(Config), mongoose_helper:ensure_muc_clean(), escalus:init_per_testcase(muc_removal, Config); init_per_testcase(roster_removal, ConfigIn) -> diff --git a/big_tests/tests/gdpr_SUITE.erl b/big_tests/tests/gdpr_SUITE.erl index 9174ed44eec..699f1e23fee 100644 --- a/big_tests/tests/gdpr_SUITE.erl +++ b/big_tests/tests/gdpr_SUITE.erl @@ -180,7 +180,7 @@ all_mam_testcases() -> init_per_suite(Config) -> #{node := MimNode} = distributed_helper:mim(), Config1 = [{{ejabberd_cwd, MimNode}, get_mim_cwd()} | dynamic_modules:save_modules(host_type(), Config)], - muc_helper:load_muc(), + muc_helper:load_muc(Config), escalus:init_per_suite(Config1). end_per_suite(Config) -> @@ -243,7 +243,7 @@ init_per_testcase(CN, Config) when Config1; init_per_testcase(CN, Config) when CN =:= retrieve_inbox_muc; CN =:= remove_inbox_muc -> - muc_helper:load_muc(), + muc_helper:load_muc(Config), Config0 = init_inbox(CN, Config, muc), Config0; @@ -316,7 +316,7 @@ end_per_testcase(CN, Config) when escalus:end_per_testcase(CN, Config); end_per_testcase(CN, Config) when CN =:= retrieve_inbox_muc; CN =:= remove_inbox_muc -> - muc_helper:unload_muc(), + muc_helper:unload_muc(Config), escalus:end_per_testcase(CN, Config); end_per_testcase(CN, Config) -> escalus_fresh:clean(), diff --git a/big_tests/tests/graphql_muc_SUITE.erl b/big_tests/tests/graphql_muc_SUITE.erl index dd968c3d6b4..2c0d6af8a3c 100644 --- a/big_tests/tests/graphql_muc_SUITE.erl +++ b/big_tests/tests/graphql_muc_SUITE.erl @@ -227,7 +227,7 @@ init_per_suite(Config) -> end_per_suite(Config) -> escalus_fresh:clean(), mongoose_helper:ensure_muc_clean(), - muc_helper:unload_muc(), + muc_helper:unload_muc(Config), dynamic_modules:restore_modules(Config), escalus:end_per_suite(Config). @@ -237,20 +237,20 @@ init_per_group(admin_cli, Config) -> graphql_helper:init_admin_cli(Config); init_per_group(domain_admin_muc, Config) -> maybe_enable_mam(), - ensure_muc_started(), + ensure_muc_started(Config), graphql_helper:init_domain_admin_handler(Config); init_per_group(user, Config) -> graphql_helper:init_user(Config); init_per_group(Group, Config) when Group =:= admin_muc_configured; Group =:= user_muc_configured -> disable_mam(), - ensure_muc_started(), + ensure_muc_started(Config), Config; init_per_group(Group, Config) when Group =:= admin_muc_and_mam_configured; Group =:= user_muc_and_mam_configured -> case maybe_enable_mam() of true -> - ensure_muc_started(), + ensure_muc_started(Config), ensure_muc_light_started(Config); false -> {skip, "No MAM backend available"} @@ -277,9 +277,9 @@ maybe_enable_mam() -> true end. -ensure_muc_started() -> +ensure_muc_started(Config) -> SecondaryHostType = domain_helper:secondary_host_type(), - muc_helper:load_muc(), + muc_helper:load_muc(Config), muc_helper:load_muc(SecondaryHostType), mongoose_helper:ensure_muc_clean(). diff --git a/big_tests/tests/graphql_muc_light_SUITE.erl b/big_tests/tests/graphql_muc_light_SUITE.erl index c9deff69444..872633016a5 100644 --- a/big_tests/tests/graphql_muc_light_SUITE.erl +++ b/big_tests/tests/graphql_muc_light_SUITE.erl @@ -237,7 +237,7 @@ init_per_group(Group, Config) when Group =:= user_muc_light_with_mam; Group =:= domain_admin_muc_light_with_mam -> case maybe_enable_mam() of true -> - ensure_muc_started(), + ensure_muc_started(Config), ensure_muc_light_started(Config); false -> {skip, "No MAM backend available"} @@ -281,8 +281,8 @@ ensure_muc_light_stopped(Config) -> dynamic_modules:ensure_modules(SecondaryHostType, [{mod_muc_light, stopped}]), [{muc_light_host, <<"NON_EXISTING">>} | Config]. -ensure_muc_started() -> - muc_helper:load_muc(), +ensure_muc_started(Config) -> + muc_helper:load_muc(Config), mongoose_helper:ensure_muc_clean(). ensure_muc_stopped() -> diff --git a/big_tests/tests/inbox_SUITE.erl b/big_tests/tests/inbox_SUITE.erl index 665831b3518..72fdbada0e3 100644 --- a/big_tests/tests/inbox_SUITE.erl +++ b/big_tests/tests/inbox_SUITE.erl @@ -186,7 +186,7 @@ init_per_group(muclight_config, Config) -> Config1 = inbox_helper:reload_inbox_option(Config, groupchat, [muclight]), escalus:create_users(Config1, escalus:get_users([alice, alice_bis, bob, kate, mike])); init_per_group(muc, Config) -> - muc_helper:load_muc(), + muc_helper:load_muc(Config), inbox_helper:reload_inbox_option(Config, groupchat, [muc]); init_per_group(limit_result, Config) -> OptKey = [{modules, domain_helper:host_type()}, mod_inbox, max_result_limit], diff --git a/big_tests/tests/mam_SUITE.erl b/big_tests/tests/mam_SUITE.erl index 6a55b0347dd..1ba9cc198be 100644 --- a/big_tests/tests/mam_SUITE.erl +++ b/big_tests/tests/mam_SUITE.erl @@ -540,7 +540,7 @@ suite() -> require_rpc_nodes([mim]) ++ escalus:suite(). init_per_suite(Config) -> - muc_helper:load_muc(), + muc_helper:load_muc(Config), mam_helper:prepare_for_suite( increase_limits( delete_users([{escalus_user_db, {module, escalus_ejabberd}} diff --git a/big_tests/tests/mod_event_pusher_rabbit_SUITE.erl b/big_tests/tests/mod_event_pusher_rabbit_SUITE.erl index 00f8fc5b94d..a4945aeea80 100644 --- a/big_tests/tests/mod_event_pusher_rabbit_SUITE.erl +++ b/big_tests/tests/mod_event_pusher_rabbit_SUITE.erl @@ -110,7 +110,7 @@ init_per_suite(Config) -> true -> start_rabbit_wpool(domain()), {ok, _} = application:ensure_all_started(amqp_client), - muc_helper:load_muc(), + muc_helper:load_muc(Config), escalus:init_per_suite(Config); false -> {skip, "RabbitMQ server is not available on default port."} diff --git a/big_tests/tests/mod_event_pusher_sns_SUITE.erl b/big_tests/tests/mod_event_pusher_sns_SUITE.erl index e3ab7be7d4e..217eeddd94e 100644 --- a/big_tests/tests/mod_event_pusher_sns_SUITE.erl +++ b/big_tests/tests/mod_event_pusher_sns_SUITE.erl @@ -60,7 +60,7 @@ init_per_suite(Config) -> %% For mocking with unnamed functions mongoose_helper:inject_module(?MODULE), - muc_helper:load_muc(), + muc_helper:load_muc(Config), escalus:init_per_suite(Config); {error, _} -> {skip, "erlcloud dependency is not enabled"} diff --git a/big_tests/tests/mod_global_distrib_SUITE.erl b/big_tests/tests/mod_global_distrib_SUITE.erl index 8423bf93372..88c4cb0503b 100644 --- a/big_tests/tests/mod_global_distrib_SUITE.erl +++ b/big_tests/tests/mod_global_distrib_SUITE.erl @@ -254,7 +254,7 @@ init_per_testcase(CaseName, Config) {_, EuropeHost, _} = lists:keyfind(europe_node1, 1, get_hosts()), trigger_rebalance(asia_node, EuropeHost), %% Load muc on mim node - muc_helper:load_muc(), + muc_helper:load_muc(Config), RegNode = ct:get_config({hosts, reg, node}), %% Wait for muc.localhost to become visible from reg node wait_for_domain(RegNode, muc_helper:muc_host()), diff --git a/big_tests/tests/muc_SUITE.erl b/big_tests/tests/muc_SUITE.erl index 5890e4288fd..738b8dc7cc3 100644 --- a/big_tests/tests/muc_SUITE.erl +++ b/big_tests/tests/muc_SUITE.erl @@ -29,8 +29,6 @@ -import(muc_helper, [muc_host/0, - load_muc/0, - unload_muc/0, start_room/5, generate_rpc_jid/1, destroy_room/1, @@ -331,14 +329,14 @@ init_per_suite(Config) -> Config2 = escalus:init_per_suite(Config), Config3 = dynamic_modules:save_modules(host_type(), Config2), dynamic_modules:restart(host_type(), mod_disco, default_mod_config(mod_disco)), - load_muc(), + muc_helper:load_muc(Config), mongoose_helper:ensure_muc_clean(), Config3. end_per_suite(Config) -> escalus_fresh:clean(), mongoose_helper:ensure_muc_clean(), - unload_muc(), + muc_helper:unload_muc(), dynamic_modules:restore_modules(Config), escalus:end_per_suite(Config). diff --git a/big_tests/tests/muc_helper.erl b/big_tests/tests/muc_helper.erl index bca13681e85..fed89e7484a 100644 --- a/big_tests/tests/muc_helper.erl +++ b/big_tests/tests/muc_helper.erl @@ -52,14 +52,15 @@ foreach_recipient(Users, VerifyFun) -> VerifyFun(escalus:wait_for_stanza(Recipient)) end, Users). -load_muc() -> - load_muc(domain_helper:host_type()). +load_muc(Config) -> + load_muc(Config, domain_helper:host_type()). -load_muc(HostType) -> +load_muc(Config, HostType) -> Backend = muc_backend(), MucHostPattern = ct:get_config({hosts, mim, muc_service_pattern}), ct:log("Starting MUC for ~p", [HostType]), Opts = #{host => subhost_pattern(MucHostPattern), backend => Backend, + online_backend => muc_online_backend(Config), hibernate_timeout => 2000, hibernated_room_check_interval => 1000, hibernated_room_timeout => 2000, @@ -86,6 +87,9 @@ muc_host_pattern() -> muc_backend() -> mongoose_helper:mnesia_or_rdbms_backend(). +muc_online_backend(Config) -> + ct_helper:get_preset_var(Config, muc_backend, mnesia). + start_room(Config, User, Room, Nick, Opts) -> From = generate_rpc_jid(User), create_instant_room(Room, From, Nick, Opts), diff --git a/big_tests/tests/muc_http_api_SUITE.erl b/big_tests/tests/muc_http_api_SUITE.erl index 2b49c7c51ac..5fd2eecec98 100644 --- a/big_tests/tests/muc_http_api_SUITE.erl +++ b/big_tests/tests/muc_http_api_SUITE.erl @@ -66,7 +66,7 @@ failure_response() -> %%-------------------------------------------------------------------- init_per_suite(Config) -> - muc_helper:load_muc(), + muc_helper:load_muc(Config), escalus:init_per_suite(Config). end_per_suite(Config) -> diff --git a/src/mod_muc.erl b/src/mod_muc.erl index 039e08f17f7..01aff7d9c50 100644 --- a/src/mod_muc.erl +++ b/src/mod_muc.erl @@ -161,6 +161,7 @@ start_link(HostType, Opts) -> -spec start(host_type(), _) -> ok. start(HostType, Opts) when is_map(Opts) -> + mongoose_muc_online_backend:start(HostType, Opts), ensure_metrics(HostType), start_supervisor(HostType), start_server(HostType, Opts), @@ -196,6 +197,8 @@ config_spec() -> #section{ items = #{<<"backend">> => #option{type = atom, validate = {module, mod_muc}}, + <<"online_backend">> => #option{type = atom, + validate = {module, mongoose_muc_online}}, <<"host">> => #option{type = string, validate = subdomain_template, process = fun mongoose_subdomain_utils:make_subdomain_pattern/1}, @@ -247,6 +250,7 @@ config_spec() -> defaults() -> #{<<"backend">> => mnesia, + <<"online_backend">> => mnesia, <<"host">> => default_host(), <<"access">> => all, <<"access_create">> => all, @@ -363,11 +367,7 @@ stop_gen_server(HostType) -> %% So the message sending must be catched -spec room_destroyed(host_type(), jid:server(), room(), pid()) -> 'ok'. room_destroyed(HostType, MucHost, Room, Pid) -> - Obj = #muc_online_room{name_host = {Room, MucHost}, - host_type = HostType, pid = Pid}, - F = fun() -> mnesia:delete_object(Obj) end, - {atomic, ok} = mnesia:transaction(F), - ok. + mongoose_muc_online_backend:room_destroyed(HostType, MucHost, Room, Pid). %% @doc Create a room. %% If Opts = default, the default room options are used. diff --git a/src/muc/mongoose_muc_online_backend.erl b/src/muc/mongoose_muc_online_backend.erl new file mode 100644 index 00000000000..ba3aa8df5b6 --- /dev/null +++ b/src/muc/mongoose_muc_online_backend.erl @@ -0,0 +1,24 @@ +-module(mongoose_muc_online_backend). + +-export([start/2, + room_destroyed/4]). + +-define(MAIN_MODULE, mongoose_muc_online). + +%% Callbacks + +%% API Functions + +-spec start(mongooseim:host_type(), gen_mod:module_opts()) -> any(). +start(HostType, Opts = #{online_backend := Backend}) -> + mongoose_backend:init(HostType, ?MAIN_MODULE, tracked_funs(), #{backend => Backend}), + mongoose_backend:call(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, [HostType, Opts]). + +-spec tracked_funs() -> atom(). +tracked_funs() -> + [room_destroyed]. + +-spec room_destroyed(mongooseim:host_type(), jid:server(), mod_muc:room(), pid()) -> ok. +room_destroyed(HostType, MucHost, Room, Pid) -> + Args = [HostType, MucHost, Room, Pid], + mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). diff --git a/src/muc/mongoose_muc_online_mnesia.erl b/src/muc/mongoose_muc_online_mnesia.erl new file mode 100644 index 00000000000..84261a8955f --- /dev/null +++ b/src/muc/mongoose_muc_online_mnesia.erl @@ -0,0 +1,18 @@ +-module(mongoose_muc_online_mnesia). +-export([start/2, + room_destroyed/4]). + +-include_lib("mod_muc.hrl"). + +start(HostType, Opts) -> + ok. + +%% Race condition is possible between register and room_destroyed +%% (Because register is outside of the room process) +-spec room_destroyed(mongooseim:host_type(), jid:server(), mod_muc:room(), pid()) -> ok. +room_destroyed(HostType, MucHost, Room, Pid) -> + Obj = #muc_online_room{name_host = {Room, MucHost}, + host_type = HostType, pid = Pid}, + F = fun() -> mnesia:delete_object(Obj) end, + {atomic, ok} = mnesia:transaction(F), + ok. From 5efe81399493c3889eda4b0997c07cd61d152e9a Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 12 Jul 2023 12:01:56 +0200 Subject: [PATCH 116/161] Move mnesia:create_table into the backend module --- src/mod_muc.erl | 4 ---- src/muc/mongoose_muc_online_mnesia.erl | 6 +++++- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/mod_muc.erl b/src/mod_muc.erl index 01aff7d9c50..ab20313e218 100644 --- a/src/mod_muc.erl +++ b/src/mod_muc.erl @@ -444,10 +444,6 @@ get_nick(HostType, MucHost, From) -> -spec init({host_type(), map()}) -> {ok, state()}. init({HostType, Opts}) -> mod_muc_backend:init(HostType, Opts), - mnesia:create_table(muc_online_room, - [{ram_copies, [node()]}, - {attributes, record_info(fields, muc_online_room)}]), - mnesia:add_table_copy(muc_online_room, node(), ram_copies), catch ets:new(muc_online_users, [bag, named_table, public, {keypos, 2}]), clean_table_from_bad_node(node(), HostType), mnesia:subscribe(system), diff --git a/src/muc/mongoose_muc_online_mnesia.erl b/src/muc/mongoose_muc_online_mnesia.erl index 84261a8955f..561a7d61920 100644 --- a/src/muc/mongoose_muc_online_mnesia.erl +++ b/src/muc/mongoose_muc_online_mnesia.erl @@ -4,7 +4,11 @@ -include_lib("mod_muc.hrl"). -start(HostType, Opts) -> +start(_HostType, _Opts) -> + mnesia:create_table(muc_online_room, + [{ram_copies, [node()]}, + {attributes, record_info(fields, muc_online_room)}]), + mnesia:add_table_copy(muc_online_room, node(), ram_copies), ok. %% Race condition is possible between register and room_destroyed From 566ba24a637a41e2a8c4e2ad791854557fdd1c2b Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 20 Jul 2023 19:42:58 +0200 Subject: [PATCH 117/161] Move register_room into mongoose_muc_online_mnesia --- src/mod_muc.erl | 17 +---------------- src/muc/mongoose_muc_online_backend.erl | 8 +++++++- src/muc/mongoose_muc_online_mnesia.erl | 20 ++++++++++++++++++++ 3 files changed, 28 insertions(+), 17 deletions(-) diff --git a/src/mod_muc.erl b/src/mod_muc.erl index ab20313e218..4416651b213 100644 --- a/src/mod_muc.erl +++ b/src/mod_muc.erl @@ -921,22 +921,7 @@ register_room_or_stop_if_duplicate(HostType, MucHost, Room, Pid) -> -spec register_room(HostType :: host_type(), jid:server(), room(), pid()) -> ok | {exists, pid()} | {error, term()}. register_room(HostType, MucHost, Room, Pid) -> - F = fun() -> - case mnesia:read(muc_online_room, {Room, MucHost}, write) of - [] -> - mnesia:write(#muc_online_room{name_host = {Room, MucHost}, - host_type = HostType, - pid = Pid}); - [R] -> - {exists, R#muc_online_room.pid} - end - end, - simple_transaction_result(mnesia:transaction(F)). - -simple_transaction_result({atomic, Res}) -> - Res; -simple_transaction_result({aborted, Reason}) -> - {error, Reason}. + mongoose_muc_online_backend:register_room(HostType, MucHost, Room, Pid). -spec room_jid_to_pid(RoomJID :: jid:jid()) -> {ok, pid()} | {error, not_found}. room_jid_to_pid(#jid{luser=RoomName, lserver=MucService}) -> diff --git a/src/muc/mongoose_muc_online_backend.erl b/src/muc/mongoose_muc_online_backend.erl index ba3aa8df5b6..ed714031fdb 100644 --- a/src/muc/mongoose_muc_online_backend.erl +++ b/src/muc/mongoose_muc_online_backend.erl @@ -1,6 +1,7 @@ -module(mongoose_muc_online_backend). -export([start/2, + register_room/4, room_destroyed/4]). -define(MAIN_MODULE, mongoose_muc_online). @@ -16,7 +17,12 @@ start(HostType, Opts = #{online_backend := Backend}) -> -spec tracked_funs() -> atom(). tracked_funs() -> - [room_destroyed]. + [register_room, + room_destroyed]. + +register_room(HostType, MucHost, Room, Pid) -> + Args = [HostType, MucHost, Room, Pid], + mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). -spec room_destroyed(mongooseim:host_type(), jid:server(), mod_muc:room(), pid()) -> ok. room_destroyed(HostType, MucHost, Room, Pid) -> diff --git a/src/muc/mongoose_muc_online_mnesia.erl b/src/muc/mongoose_muc_online_mnesia.erl index 561a7d61920..3b722b35172 100644 --- a/src/muc/mongoose_muc_online_mnesia.erl +++ b/src/muc/mongoose_muc_online_mnesia.erl @@ -1,5 +1,6 @@ -module(mongoose_muc_online_mnesia). -export([start/2, + register_room/4, room_destroyed/4]). -include_lib("mod_muc.hrl"). @@ -11,6 +12,19 @@ start(_HostType, _Opts) -> mnesia:add_table_copy(muc_online_room, node(), ram_copies), ok. +register_room(HostType, MucHost, Room, Pid) -> + F = fun() -> + case mnesia:read(muc_online_room, {Room, MucHost}, write) of + [] -> + mnesia:write(#muc_online_room{name_host = {Room, MucHost}, + host_type = HostType, + pid = Pid}); + [R] -> + {exists, R#muc_online_room.pid} + end + end, + simple_transaction_result(mnesia:transaction(F)). + %% Race condition is possible between register and room_destroyed %% (Because register is outside of the room process) -spec room_destroyed(mongooseim:host_type(), jid:server(), mod_muc:room(), pid()) -> ok. @@ -20,3 +34,9 @@ room_destroyed(HostType, MucHost, Room, Pid) -> F = fun() -> mnesia:delete_object(Obj) end, {atomic, ok} = mnesia:transaction(F), ok. + + +simple_transaction_result({atomic, Res}) -> + Res; +simple_transaction_result({aborted, Reason}) -> + {error, Reason}. From e83128c257f22635809d12a54a01997e0aee1045 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 20 Jul 2023 19:59:12 +0200 Subject: [PATCH 118/161] Move find_room_pid into mongoose_muc_online_mnesia --- src/mod_muc.erl | 21 ++++++++++----------- src/muc/mongoose_muc_online_backend.erl | 7 ++++++- src/muc/mongoose_muc_online_mnesia.erl | 12 ++++++++++-- 3 files changed, 26 insertions(+), 14 deletions(-) diff --git a/src/mod_muc.erl b/src/mod_muc.erl index 4416651b213..6d6665de30b 100644 --- a/src/mod_muc.erl +++ b/src/mod_muc.erl @@ -646,16 +646,16 @@ route_to_room(_MucHost, <<>>, {_, To, _Acc, _} = Routed, State) -> {_, _, Nick} = jid:to_lower(To), route_by_nick(Nick, Routed, State); route_to_room(MucHost, Room, Routed, #muc_state{} = State) -> - case mnesia:dirty_read(muc_online_room, {Room, MucHost}) of - [] -> + HostType = State#muc_state.host_type, + case find_room_pid(HostType, MucHost, Room) of + {error, not_found} -> case get_registered_room_or_route_error(MucHost, Room, Routed, State) of {ok, Pid} -> route_to_online_room(Pid, Routed); {route_error, _ErrText} -> ok end; - [R] -> - Pid = R#muc_online_room.pid, + {ok, Pid} -> route_to_online_room(Pid, Routed) end. @@ -924,13 +924,12 @@ register_room(HostType, MucHost, Room, Pid) -> mongoose_muc_online_backend:register_room(HostType, MucHost, Room, Pid). -spec room_jid_to_pid(RoomJID :: jid:jid()) -> {ok, pid()} | {error, not_found}. -room_jid_to_pid(#jid{luser=RoomName, lserver=MucService}) -> - case mnesia:dirty_read(muc_online_room, {RoomName, MucService}) of - [R] -> - {ok, R#muc_online_room.pid}; - [] -> - {error, not_found} - end. +room_jid_to_pid(#jid{luser = Room, lserver = MucHost}) -> + {ok, HostType} = mongoose_domain_api:get_subdomain_host_type(MucHost), + find_room_pid(HostType, MucHost, Room). + +find_room_pid(HostType, MucHost, Room) -> + mongoose_muc_online_backend:find_room_pid(HostType, MucHost, Room). -spec default_host() -> mongoose_subdomain_utils:subdomain_pattern(). default_host() -> diff --git a/src/muc/mongoose_muc_online_backend.erl b/src/muc/mongoose_muc_online_backend.erl index ed714031fdb..c450a588544 100644 --- a/src/muc/mongoose_muc_online_backend.erl +++ b/src/muc/mongoose_muc_online_backend.erl @@ -2,7 +2,8 @@ -export([start/2, register_room/4, - room_destroyed/4]). + room_destroyed/4, + find_room_pid/3]). -define(MAIN_MODULE, mongoose_muc_online). @@ -28,3 +29,7 @@ register_room(HostType, MucHost, Room, Pid) -> room_destroyed(HostType, MucHost, Room, Pid) -> Args = [HostType, MucHost, Room, Pid], mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +find_room_pid(HostType, MucHost, Room) -> + Args = [HostType, MucHost, Room], + mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). diff --git a/src/muc/mongoose_muc_online_mnesia.erl b/src/muc/mongoose_muc_online_mnesia.erl index 3b722b35172..921fa2940f0 100644 --- a/src/muc/mongoose_muc_online_mnesia.erl +++ b/src/muc/mongoose_muc_online_mnesia.erl @@ -1,7 +1,8 @@ -module(mongoose_muc_online_mnesia). -export([start/2, register_room/4, - room_destroyed/4]). + room_destroyed/4, + find_room_pid/3]). -include_lib("mod_muc.hrl"). @@ -35,8 +36,15 @@ room_destroyed(HostType, MucHost, Room, Pid) -> {atomic, ok} = mnesia:transaction(F), ok. - simple_transaction_result({atomic, Res}) -> Res; simple_transaction_result({aborted, Reason}) -> {error, Reason}. + +find_room_pid(HostType, MucHost, Room) -> + case mnesia:dirty_read(muc_online_room, {Room, MucHost}) of + [R] -> + {ok, R#muc_online_room.pid}; + [] -> + {error, not_found} + end. From 20864d1b00511471c8376c0afaebcef2dd9df766 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 20 Jul 2023 23:11:15 +0200 Subject: [PATCH 119/161] Move get_online_rooms function --- src/mod_muc.erl | 6 ++---- src/muc/mongoose_muc_online_backend.erl | 7 ++++++- src/muc/mongoose_muc_online_mnesia.erl | 11 +++++++++-- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/src/mod_muc.erl b/src/mod_muc.erl index 6d6665de30b..46ec06e24b3 100644 --- a/src/mod_muc.erl +++ b/src/mod_muc.erl @@ -1164,10 +1164,8 @@ broadcast_service_message(MucHost, Msg) -> -spec get_vh_rooms(muc_host()) -> [muc_online_room()]. get_vh_rooms(MucHost) -> - mnesia:dirty_select(muc_online_room, - [{#muc_online_room{name_host = '$1', _ = '_'}, - [{'==', {element, 2, '$1'}, MucHost}], - ['$_']}]). + {ok, HostType} = mongoose_domain_api:get_subdomain_host_type(MucHost), + mongoose_muc_online_backend:get_online_rooms(HostType, MucHost). -spec get_persistent_vh_rooms(muc_host()) -> [muc_room()]. get_persistent_vh_rooms(MucHost) -> diff --git a/src/muc/mongoose_muc_online_backend.erl b/src/muc/mongoose_muc_online_backend.erl index c450a588544..48d23b3bef3 100644 --- a/src/muc/mongoose_muc_online_backend.erl +++ b/src/muc/mongoose_muc_online_backend.erl @@ -3,7 +3,8 @@ -export([start/2, register_room/4, room_destroyed/4, - find_room_pid/3]). + find_room_pid/3, + get_online_rooms/2]). -define(MAIN_MODULE, mongoose_muc_online). @@ -33,3 +34,7 @@ room_destroyed(HostType, MucHost, Room, Pid) -> find_room_pid(HostType, MucHost, Room) -> Args = [HostType, MucHost, Room], mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +get_online_rooms(HostType, MucHost) -> + Args = [HostType, MucHost], + mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). diff --git a/src/muc/mongoose_muc_online_mnesia.erl b/src/muc/mongoose_muc_online_mnesia.erl index 921fa2940f0..38b19992af0 100644 --- a/src/muc/mongoose_muc_online_mnesia.erl +++ b/src/muc/mongoose_muc_online_mnesia.erl @@ -2,7 +2,8 @@ -export([start/2, register_room/4, room_destroyed/4, - find_room_pid/3]). + find_room_pid/3, + get_online_rooms/2]). -include_lib("mod_muc.hrl"). @@ -41,10 +42,16 @@ simple_transaction_result({atomic, Res}) -> simple_transaction_result({aborted, Reason}) -> {error, Reason}. -find_room_pid(HostType, MucHost, Room) -> +find_room_pid(_HostType, MucHost, Room) -> case mnesia:dirty_read(muc_online_room, {Room, MucHost}) of [R] -> {ok, R#muc_online_room.pid}; [] -> {error, not_found} end. + +get_online_rooms(_HostType, MucHost) -> + mnesia:dirty_select(muc_online_room, + [{#muc_online_room{name_host = '$1', _ = '_'}, + [{'==', {element, 2, '$1'}, MucHost}], + ['$_']}]). From 4bb0321518a4c4aced6acfff8d003aed708e2114 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 20 Jul 2023 23:13:56 +0200 Subject: [PATCH 120/161] Remove clean_table_from_bad_node/1 --- src/mod_muc.erl | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/src/mod_muc.erl b/src/mod_muc.erl index 46ec06e24b3..2651788e730 100644 --- a/src/mod_muc.erl +++ b/src/mod_muc.erl @@ -536,8 +536,8 @@ handle_call({create_instant, ServerHost, MucHost, Room, From, Nick, Opts}, handle_cast(_Msg, State) -> {noreply, State}. -handle_info({mnesia_system_event, {mnesia_down, Node}}, State) -> - clean_table_from_bad_node(Node), +handle_info({mnesia_system_event, {mnesia_down, Node}}, #muc_state{host_type = HostType} = State) -> + clean_table_from_bad_node(Node, HostType), {noreply, State}; handle_info(stop_hibernated_persistent_rooms, #muc_state{host_type = HostType, @@ -1177,21 +1177,6 @@ get_persistent_vh_rooms(MucHost) -> [] end. --spec clean_table_from_bad_node(node()) -> any(). -clean_table_from_bad_node(Node) -> - F = fun() -> - Es = mnesia:select( - muc_online_room, - [{#muc_online_room{pid = '$1', _ = '_'}, - [{'==', {node, '$1'}, Node}], - ['$_']}]), - lists:foreach(fun(E) -> - mnesia:delete_object(E) - end, Es) - end, - mnesia:async_dirty(F). - - -spec clean_table_from_bad_node(node(), host_type()) -> any(). clean_table_from_bad_node(Node, HostType) -> F = fun() -> From 077582c11b146ffcff2f9c1ed7e220cb25b3b1b9 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 20 Jul 2023 23:17:59 +0200 Subject: [PATCH 121/161] Move node_cleanup into mongoose_muc_online_mnesia --- src/mod_muc.erl | 22 +++++----------------- src/muc/mongoose_muc_online_backend.erl | 10 ++++++++-- src/muc/mongoose_muc_online_mnesia.erl | 19 ++++++++++++++++++- 3 files changed, 31 insertions(+), 20 deletions(-) diff --git a/src/mod_muc.erl b/src/mod_muc.erl index 2651788e730..5e5a1ee64cc 100644 --- a/src/mod_muc.erl +++ b/src/mod_muc.erl @@ -445,7 +445,7 @@ get_nick(HostType, MucHost, From) -> init({HostType, Opts}) -> mod_muc_backend:init(HostType, Opts), catch ets:new(muc_online_users, [bag, named_table, public, {keypos, 2}]), - clean_table_from_bad_node(node(), HostType), + node_cleanup(HostType, node()), mnesia:subscribe(system), #{access := Access, access_create := AccessCreate, @@ -537,7 +537,7 @@ handle_cast(_Msg, State) -> {noreply, State}. handle_info({mnesia_system_event, {mnesia_down, Node}}, #muc_state{host_type = HostType} = State) -> - clean_table_from_bad_node(Node, HostType), + node_cleanup(HostType, Node), {noreply, State}; handle_info(stop_hibernated_persistent_rooms, #muc_state{host_type = HostType, @@ -1177,21 +1177,9 @@ get_persistent_vh_rooms(MucHost) -> [] end. --spec clean_table_from_bad_node(node(), host_type()) -> any(). -clean_table_from_bad_node(Node, HostType) -> - F = fun() -> - Es = mnesia:select( - muc_online_room, - [{#muc_online_room{pid = '$1', - host_type = HostType, - _ = '_'}, - [{'==', {node, '$1'}, Node}], - ['$_']}]), - lists:foreach(fun(E) -> - mnesia:delete_object(E) - end, Es) - end, - mnesia:async_dirty(F). +-spec node_cleanup(host_type(), node()) -> ok. +node_cleanup(HostType, Node) -> + mongoose_muc_online_backend:node_cleanup(HostType, Node). %%==================================================================== %% Hooks handlers diff --git a/src/muc/mongoose_muc_online_backend.erl b/src/muc/mongoose_muc_online_backend.erl index 48d23b3bef3..98012230596 100644 --- a/src/muc/mongoose_muc_online_backend.erl +++ b/src/muc/mongoose_muc_online_backend.erl @@ -4,7 +4,8 @@ register_room/4, room_destroyed/4, find_room_pid/3, - get_online_rooms/2]). + get_online_rooms/2, + node_cleanup/2]). -define(MAIN_MODULE, mongoose_muc_online). @@ -20,7 +21,8 @@ start(HostType, Opts = #{online_backend := Backend}) -> -spec tracked_funs() -> atom(). tracked_funs() -> [register_room, - room_destroyed]. + room_destroyed, + get_online_rooms]. register_room(HostType, MucHost, Room, Pid) -> Args = [HostType, MucHost, Room, Pid], @@ -38,3 +40,7 @@ find_room_pid(HostType, MucHost, Room) -> get_online_rooms(HostType, MucHost) -> Args = [HostType, MucHost], mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +node_cleanup(HostType, Node) -> + Args = [HostType, Node], + mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). diff --git a/src/muc/mongoose_muc_online_mnesia.erl b/src/muc/mongoose_muc_online_mnesia.erl index 38b19992af0..53d9b6431d7 100644 --- a/src/muc/mongoose_muc_online_mnesia.erl +++ b/src/muc/mongoose_muc_online_mnesia.erl @@ -3,7 +3,8 @@ register_room/4, room_destroyed/4, find_room_pid/3, - get_online_rooms/2]). + get_online_rooms/2, + node_cleanup/2]). -include_lib("mod_muc.hrl"). @@ -55,3 +56,19 @@ get_online_rooms(_HostType, MucHost) -> [{#muc_online_room{name_host = '$1', _ = '_'}, [{'==', {element, 2, '$1'}, MucHost}], ['$_']}]). + +node_cleanup(HostType, Node) -> + F = fun() -> + Es = mnesia:select( + muc_online_room, + [{#muc_online_room{pid = '$1', + host_type = HostType, + _ = '_'}, + [{'==', {node, '$1'}, Node}], + ['$_']}]), + lists:foreach(fun(E) -> + mnesia:delete_object(E) + end, Es) + end, + mnesia:async_dirty(F), + ok. From a166de595a3ea8eefc88b2d78d932656ec0b6067 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 20 Jul 2023 23:38:54 +0200 Subject: [PATCH 122/161] Run node_cleanup for each host type --- src/hooks/mongoose_hooks.erl | 10 ++++++++-- src/mod_muc.erl | 18 ++++++++++++------ src/mongoose_cleaner.erl | 6 +++++- src/muc/mongoose_muc_online_backend.erl | 2 +- 4 files changed, 26 insertions(+), 10 deletions(-) diff --git a/src/hooks/mongoose_hooks.erl b/src/hooks/mongoose_hooks.erl index 04991ce1e40..aca2b7275ce 100644 --- a/src/hooks/mongoose_hooks.erl +++ b/src/hooks/mongoose_hooks.erl @@ -138,9 +138,10 @@ mod_global_distrib_unknown_recipient/2]). -export([remove_domain/2, - node_cleanup/1]). + node_cleanup/1, + node_cleanup_for_host_type/2]). --ignore_xref([node_cleanup/1, remove_domain/2]). +-ignore_xref([remove_domain/2]). -ignore_xref([mam_archive_sync/1, mam_muc_archive_sync/1]). %% Just a map, used by some hooks as a first argument. @@ -217,6 +218,11 @@ node_cleanup(Node) -> Params = #{node => Node}, run_global_hook(node_cleanup, #{}, Params). +-spec node_cleanup_for_host_type(HostType :: mongooseim:host_type(), Node :: node()) -> Acc :: map(). +node_cleanup_for_host_type(HostType, Node) -> + Params = #{node => Node}, + run_hook_for_host_type(node_cleanup_for_host_type, HostType, #{}, Params). + -spec failed_to_store_message(Acc) -> Result when Acc :: mongoose_acc:t(), Result :: mongoose_acc:t(). diff --git a/src/mod_muc.erl b/src/mod_muc.erl index 5e5a1ee64cc..a5f5e383733 100644 --- a/src/mod_muc.erl +++ b/src/mod_muc.erl @@ -66,7 +66,8 @@ remove_domain/3, acc_room_affiliations/3, can_access_identity/3, - disco_local_items/3]). + disco_local_items/3, + node_cleanup_for_host_type/3]). %% Stats -export([online_rooms_number/0]). @@ -446,7 +447,6 @@ init({HostType, Opts}) -> mod_muc_backend:init(HostType, Opts), catch ets:new(muc_online_users, [bag, named_table, public, {keypos, 2}]), node_cleanup(HostType, node()), - mnesia:subscribe(system), #{access := Access, access_create := AccessCreate, access_admin := AccessAdmin, @@ -536,9 +536,6 @@ handle_call({create_instant, ServerHost, MucHost, Room, From, Nick, Opts}, handle_cast(_Msg, State) -> {noreply, State}. -handle_info({mnesia_system_event, {mnesia_down, Node}}, #muc_state{host_type = HostType} = State) -> - node_cleanup(HostType, Node), - {noreply, State}; handle_info(stop_hibernated_persistent_rooms, #muc_state{host_type = HostType, hibernated_room_timeout = Timeout} = State) @@ -1261,6 +1258,14 @@ disco_local_items(Acc = #{host_type := HostType, disco_local_items(Acc, _, _) -> {ok, Acc}. +-spec node_cleanup_for_host_type(Acc, Params, Extra) -> {ok, Acc} when + Acc :: mongoose_disco:item_acc(), + Params :: map(), + Extra :: gen_hook:extra(). +node_cleanup_for_host_type(Acc, #{node := Node}, #{host_type := HostType}) -> + node_cleanup(HostType, Node), + Acc. + online_rooms_number() -> lists:sum([online_rooms_number(HostType) || HostType <- gen_mod:hosts_with_module(?MODULE)]). @@ -1324,7 +1329,8 @@ hooks(HostType) -> {remove_domain, HostType, fun ?MODULE:remove_domain/3, #{}, 50}, {acc_room_affiliations, HostType, fun ?MODULE:acc_room_affiliations/3, #{}, 50}, {can_access_identity, HostType, fun ?MODULE:can_access_identity/3, #{}, 50}, - {disco_local_items, HostType, fun ?MODULE:disco_local_items/3, #{}, 250}]. + {disco_local_items, HostType, fun ?MODULE:disco_local_items/3, #{}, 250}, + {node_cleanup_for_host_type, HostType, fun ?MODULE:node_cleanup_for_host_type/3, #{}, 50}]. subdomain_pattern(HostType) -> gen_mod:get_module_opt(HostType, ?MODULE, host). diff --git a/src/mongoose_cleaner.erl b/src/mongoose_cleaner.erl index ec8f5b1823c..388937beaaa 100644 --- a/src/mongoose_cleaner.erl +++ b/src/mongoose_cleaner.erl @@ -87,7 +87,11 @@ cleanup_modules(Node) -> end. run_node_cleanup(Node) -> - {Elapsed, RetVal} = timer:tc(mongoose_hooks, node_cleanup, [Node]), + {Elapsed, RetVal} = timer:tc(fun() -> + mongoose_hooks:node_cleanup(Node), + [mongoose_hooks:node_cleanup_for_host_type(HostType, Node) || HostType <- ?ALL_HOST_TYPES], + ok + end), ?LOG_NOTICE(#{what => cleaner_done, text => <<"Finished cleaning after dead node">>, duration => erlang:round(Elapsed / 1000), diff --git a/src/muc/mongoose_muc_online_backend.erl b/src/muc/mongoose_muc_online_backend.erl index 98012230596..a4d6c3c1c70 100644 --- a/src/muc/mongoose_muc_online_backend.erl +++ b/src/muc/mongoose_muc_online_backend.erl @@ -18,7 +18,7 @@ start(HostType, Opts = #{online_backend := Backend}) -> mongoose_backend:init(HostType, ?MAIN_MODULE, tracked_funs(), #{backend => Backend}), mongoose_backend:call(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, [HostType, Opts]). --spec tracked_funs() -> atom(). +-spec tracked_funs() -> [atom()]. tracked_funs() -> [register_room, room_destroyed, From 5e41c46608d8eab369018f7f37aebb6d64b6d516 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 20 Jul 2023 23:44:43 +0200 Subject: [PATCH 123/161] Don't execute node_cleanup in MUC on start Because we don't really need to --- src/mod_muc.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/mod_muc.erl b/src/mod_muc.erl index a5f5e383733..85ded20dcf1 100644 --- a/src/mod_muc.erl +++ b/src/mod_muc.erl @@ -446,7 +446,6 @@ get_nick(HostType, MucHost, From) -> init({HostType, Opts}) -> mod_muc_backend:init(HostType, Opts), catch ets:new(muc_online_users, [bag, named_table, public, {keypos, 2}]), - node_cleanup(HostType, node()), #{access := Access, access_create := AccessCreate, access_admin := AccessAdmin, From 1934ca0a6c504390ad9bdce4e64ee4506480d8b3 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 20 Jul 2023 23:51:50 +0200 Subject: [PATCH 124/161] Add mongoose_cleanup_SUITE:cleaner_runs_hook_on_nodedown_for_host_type testcase --- test/mongoose_cleanup_SUITE.erl | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/test/mongoose_cleanup_SUITE.erl b/test/mongoose_cleanup_SUITE.erl index acf513b4791..b70c96cac3d 100644 --- a/test/mongoose_cleanup_SUITE.erl +++ b/test/mongoose_cleanup_SUITE.erl @@ -7,7 +7,10 @@ init_per_suite/1, end_per_suite/1, init_per_group/2, end_per_group/2, init_per_testcase/2, end_per_testcase/2]). --export([cleaner_runs_hook_on_nodedown/1, notify_self_hook/3]). +-export([cleaner_runs_hook_on_nodedown/1, + cleaner_runs_hook_on_nodedown_for_host_type/1, + notify_self_hook/3, + notify_self_hook_for_host_type/3]). -export([auth_anonymous/1, last/1, stream_management/1, @@ -27,6 +30,7 @@ all() -> [ cleaner_runs_hook_on_nodedown, + cleaner_runs_hook_on_nodedown_for_host_type, auth_anonymous, last, stream_management, @@ -126,10 +130,8 @@ cleaner_runs_hook_on_nodedown(_Config) -> gen_hook:add_handler(node_cleanup, global, fun ?MODULE:notify_self_hook/3, #{self => self()}, 50), - FakeNode = fakename@fakehost, Cleaner ! {nodedown, FakeNode}, - receive {got_nodedown, FakeNode} -> ok after timer:seconds(1) -> @@ -138,10 +140,28 @@ cleaner_runs_hook_on_nodedown(_Config) -> ?assertEqual(false, meck:called(gen_hook, error_running_hook, ['_', '_', '_', '_', '_'])). +cleaner_runs_hook_on_nodedown_for_host_type(_Config) -> + HostType = ?HOST, + {ok, Cleaner} = mongoose_cleaner:start_link(), + gen_hook:add_handler(node_cleanup_for_host_type, HostType, + fun ?MODULE:notify_self_hook_for_host_type/3, + #{self => self()}, 50), + FakeNode = fakename@fakehost, + Cleaner ! {nodedown, FakeNode}, + receive + {got_nodedown_for_host_type, FakeNode, HostType} -> ok + after timer:seconds(1) -> + ct:fail({timeout, got_nodedown}) + end. + notify_self_hook(Acc, #{node := Node}, #{self := Self}) -> Self ! {got_nodedown, Node}, {ok, Acc}. +notify_self_hook_for_host_type(Acc, #{node := Node}, #{self := Self, host_type := HostType}) -> + Self ! {got_nodedown_for_host_type, Node, HostType}, + {ok, Acc}. + auth_anonymous(_Config) -> HostType = ?HOST, {U, S, R, JID, SID} = get_fake_session(), From 7d0da400c841a373c528d70212b335533ec70e97 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 21 Jul 2023 14:39:42 +0200 Subject: [PATCH 125/161] Define default online_backend in tests --- test/common/config_parser_helper.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/test/common/config_parser_helper.erl b/test/common/config_parser_helper.erl index 527eba36bbc..e11da3c0c98 100644 --- a/test/common/config_parser_helper.erl +++ b/test/common/config_parser_helper.erl @@ -905,6 +905,7 @@ default_mod_config(mod_mam_muc_rdbms_arch) -> db_jid_format => mam_jid_rfc}; default_mod_config(mod_muc) -> #{backend => mnesia, + online_backend => mnesia, host => {prefix,<<"conference.">>}, access => all, access_create => all, From e1d20375a014b9a59c10f9a42e63bdc6942ed74c Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 21 Jul 2023 15:20:41 +0200 Subject: [PATCH 126/161] Fix graphql_muc_SUITE tests --- big_tests/tests/graphql_muc_SUITE.erl | 2 +- big_tests/tests/muc_helper.erl | 2 +- src/mod_muc.erl | 8 ++++++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/big_tests/tests/graphql_muc_SUITE.erl b/big_tests/tests/graphql_muc_SUITE.erl index 2c0d6af8a3c..08a9782a563 100644 --- a/big_tests/tests/graphql_muc_SUITE.erl +++ b/big_tests/tests/graphql_muc_SUITE.erl @@ -280,7 +280,7 @@ maybe_enable_mam() -> ensure_muc_started(Config) -> SecondaryHostType = domain_helper:secondary_host_type(), muc_helper:load_muc(Config), - muc_helper:load_muc(SecondaryHostType), + muc_helper:load_muc(Config, SecondaryHostType), mongoose_helper:ensure_muc_clean(). ensure_muc_stopped() -> diff --git a/big_tests/tests/muc_helper.erl b/big_tests/tests/muc_helper.erl index fed89e7484a..6e64767fd3d 100644 --- a/big_tests/tests/muc_helper.erl +++ b/big_tests/tests/muc_helper.erl @@ -87,7 +87,7 @@ muc_host_pattern() -> muc_backend() -> mongoose_helper:mnesia_or_rdbms_backend(). -muc_online_backend(Config) -> +muc_online_backend(Config) when is_list(Config) -> ct_helper:get_preset_var(Config, muc_backend, mnesia). start_room(Config, User, Room, Nick, Opts) -> diff --git a/src/mod_muc.erl b/src/mod_muc.erl index 85ded20dcf1..c9a2c2742ed 100644 --- a/src/mod_muc.erl +++ b/src/mod_muc.erl @@ -921,8 +921,12 @@ register_room(HostType, MucHost, Room, Pid) -> -spec room_jid_to_pid(RoomJID :: jid:jid()) -> {ok, pid()} | {error, not_found}. room_jid_to_pid(#jid{luser = Room, lserver = MucHost}) -> - {ok, HostType} = mongoose_domain_api:get_subdomain_host_type(MucHost), - find_room_pid(HostType, MucHost, Room). + case mongoose_domain_api:get_subdomain_host_type(MucHost) of + {ok, HostType} -> + find_room_pid(HostType, MucHost, Room); + _ -> + {error, not_found} + end. find_room_pid(HostType, MucHost, Room) -> mongoose_muc_online_backend:find_room_pid(HostType, MucHost, Room). From 36161ef7580412d518cac40df221700db827d05a Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 21 Jul 2023 16:40:37 +0200 Subject: [PATCH 127/161] Improve output spec of the start_room function --- src/mod_muc.erl | 20 +++++++------------- src/mod_muc_room.erl | 12 ++++-------- src/muc/mongoose_muc_online_mnesia.erl | 6 ++++++ 3 files changed, 17 insertions(+), 21 deletions(-) diff --git a/src/mod_muc.erl b/src/mod_muc.erl index c9a2c2742ed..a0e4368f079 100644 --- a/src/mod_muc.erl +++ b/src/mod_muc.erl @@ -683,7 +683,7 @@ get_registered_room_or_route_error_from_presence(MucHost, Room, From, To, Acc, default_room_opts = DefRoomOpts} = State, {_, _, Nick} = jid:to_lower(To), ServerHost = make_server_host(To, State), - Result = start_new_room(HostType, ServerHost, MucHost, Access, Room, + Result = start_room(HostType, ServerHost, MucHost, Access, Room, HistorySize, RoomShaper, HttpAuthPool, From, Nick, DefRoomOpts, Acc), case Result of @@ -701,11 +701,7 @@ get_registered_room_or_route_error_from_presence(MucHost, Room, From, To, Acc, {Acc1, Err} = jlib:make_error_reply( Acc, Packet, mongoose_xmpp_errors:service_unavailable(Lang, ErrText)), ejabberd_router:route(To, From, Acc1, Err), - {route_error, ErrText}; - _ -> - %% Unknown error, most likely a room process failed to start. - %% Do not notify user (we can send "internal server error"). - erlang:error({start_new_room_failed, Room, Result}) + {route_error, ErrText} end; {error, Reason} -> Lang = exml_query:attr(Packet, <<"xml:lang">>, <<>>), @@ -872,15 +868,13 @@ check_user_can_create_room(HostType, ServerHost, AccessCreate, From, RoomID) -> {error, no_matching_acl_rule} end. --spec start_new_room(HostType :: host_type(), ServerHost :: jid:lserver(), +-spec start_room(HostType :: host_type(), ServerHost :: jid:lserver(), MucHost :: muc_host(), Access :: access(), room(), - HistorySize :: 'undefined' | integer(), RoomShaper :: shaper:shaper(), + HistorySize :: undefined | integer(), RoomShaper :: shaper:shaper(), HttpAuthPool :: none | mongoose_http_client:pool(), From :: jid:jid(), nick(), - DefRoomOpts :: 'undefined' | [any()], Acc :: mongoose_acc:t()) - -> {'error', _} - | {'ok', 'undefined' | pid()} - | {'ok', 'undefined' | pid(), _}. -start_new_room(HostType, ServerHost, MucHost, Access, Room, + DefRoomOpts :: undefined | [any()], Acc :: mongoose_acc:t()) + -> {error, {failed_to_restore, Reason :: term()}} | {ok, pid()}. +start_room(HostType, ServerHost, MucHost, Access, Room, HistorySize, RoomShaper, HttpAuthPool, From, Nick, DefRoomOpts, Acc) -> case mod_muc_backend:restore_room(HostType, MucHost, Room) of diff --git a/src/mod_muc_room.erl b/src/mod_muc_room.erl index ff74e21ae36..8d896f71424 100644 --- a/src/mod_muc_room.erl +++ b/src/mod_muc_room.erl @@ -154,13 +154,11 @@ %%% API %%%---------------------------------------------------------------------- --spec start_new(HostType :: mongooseim:host_type(), Host :: jid:server(), ServerHost :: jid:server(), +-spec start_new(HostType :: mongooseim:host_type(), Host :: jid:lserver(), ServerHost :: jid:lserver(), Access :: _, Room :: mod_muc:room(), HistorySize :: integer(), RoomShaper :: shaper:shaper(), HttpAuthPool :: none | mongoose_http_client:pool(), Creator :: jid:jid(), Nick :: mod_muc:nick(), - DefRoomOpts :: list()) -> {'error', _} - | {'ok', 'undefined' | pid()} - | {'ok', 'undefined' | pid(), _}. + DefRoomOpts :: list()) -> {ok, pid()}. start_new(HostType, Host, ServerHost, Access, Room, HistorySize, RoomShaper, HttpAuthPool, Creator, Nick, DefRoomOpts) -> Supervisor = gen_mod:get_module_proc(HostType, ejabberd_mod_muc_sup), @@ -171,12 +169,10 @@ start_new(HostType, Host, ServerHost, Access, Room, creator => Creator, nick => Nick, def_opts => DefRoomOpts}, supervisor:start_child(Supervisor, [Args]). --spec start_restored(HostType :: mongooseim:host_type(), Host :: jid:server(), ServerHost :: jid:server(), +-spec start_restored(HostType :: mongooseim:host_type(), Host :: jid:lserver(), ServerHost :: jid:lserver(), Access :: _, Room :: mod_muc:room(), HistorySize :: integer(), RoomShaper :: shaper:shaper(), HttpAuthPool :: none | mongoose_http_client:pool(), - Opts :: list()) -> {'error', _} - | {'ok', 'undefined' | pid()} - | {'ok', 'undefined' | pid(), _}. + Opts :: list()) -> {ok, pid()}. start_restored(HostType, Host, ServerHost, Access, Room, HistorySize, RoomShaper, HttpAuthPool, Opts) when is_list(Opts) -> diff --git a/src/muc/mongoose_muc_online_mnesia.erl b/src/muc/mongoose_muc_online_mnesia.erl index 53d9b6431d7..25b8b305163 100644 --- a/src/muc/mongoose_muc_online_mnesia.erl +++ b/src/muc/mongoose_muc_online_mnesia.erl @@ -8,6 +8,7 @@ -include_lib("mod_muc.hrl"). +-spec start(mongooseim:host_type(), gen_mod:module_opts()) -> ok. start(_HostType, _Opts) -> mnesia:create_table(muc_online_room, [{ram_copies, [node()]}, @@ -15,6 +16,11 @@ start(_HostType, _Opts) -> mnesia:add_table_copy(muc_online_room, node(), ram_copies), ok. +-spec register_room( + HostType :: mongooseim:host_type(), + MucHost :: jid:lserver(), + Room :: mod_muc:room(), + Pid :: pid()) -> ok | {exists, pid()} | {error, term()}. register_room(HostType, MucHost, Room, Pid) -> F = fun() -> case mnesia:read(muc_online_room, {Room, MucHost}, write) of From b645f81472de8f622c2623ba2ca4b961324a12c4 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 31 Jul 2023 14:15:07 +0200 Subject: [PATCH 128/161] Add CETS backend for mongoose_muc_online --- big_tests/test.config | 2 +- big_tests/tests/muc_helper.erl | 2 +- src/muc/mongoose_muc_online_cets.erl | 88 ++++++++++++++++++++++++++++ 3 files changed, 90 insertions(+), 2 deletions(-) create mode 100644 src/muc/mongoose_muc_online_cets.erl diff --git a/big_tests/test.config b/big_tests/test.config index 49e0664ec3a..c52100ba542 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -244,7 +244,7 @@ {component_backend, "\"cets\""}, {s2s_backend, "\"cets\""}, {stream_management_backend, cets}, - {muc_backend, cets}, + {muc_online_backend, cets}, {auth_method, "rdbms"}, {internal_databases, "[internal_databases.cets] cluster_name = \"{{cluster_name}}\" diff --git a/big_tests/tests/muc_helper.erl b/big_tests/tests/muc_helper.erl index 6e64767fd3d..bdd229936a0 100644 --- a/big_tests/tests/muc_helper.erl +++ b/big_tests/tests/muc_helper.erl @@ -88,7 +88,7 @@ muc_backend() -> mongoose_helper:mnesia_or_rdbms_backend(). muc_online_backend(Config) when is_list(Config) -> - ct_helper:get_preset_var(Config, muc_backend, mnesia). + ct_helper:get_preset_var(Config, muc_online_backend, mnesia). start_room(Config, User, Room, Nick, Opts) -> From = generate_rpc_jid(User), diff --git a/src/muc/mongoose_muc_online_cets.erl b/src/muc/mongoose_muc_online_cets.erl new file mode 100644 index 00000000000..6c97152aa10 --- /dev/null +++ b/src/muc/mongoose_muc_online_cets.erl @@ -0,0 +1,88 @@ +-module(mongoose_muc_online_cets). +-export([start/2, + register_room/4, + room_destroyed/4, + find_room_pid/3, + get_online_rooms/2, + node_cleanup/2]). + +-export([handle_conflict/2]). + +-include_lib("mod_muc.hrl"). + +%% Use MucHost first for prefix select optimization in get_online_rooms +-type muc_tuple() :: {{MucHost :: jid:lserver(), Room :: mod_muc:room()}, Pid :: pid()}. + +table_name(HostType) -> + binary_to_atom(<<"cets_muc_online_room_", HostType/binary>>). + +-spec start(mongooseim:host_type(), gen_mod:module_opts()) -> ok. +start(HostType, _Opts) -> + Tab = table_name(HostType), + %% Non-random, non-node-specific keys + %% This means that default merging would not work + cets:start(Tab, #{handle_conflict => fun ?MODULE:handle_conflict/2}), + cets_discovery:add_table(mongoose_cets_discovery, Tab), + ok. + +%% We should keep one room and stop another room +%% But stopping logic needs to be tested heavily and designed +%% because we would need to figure out how to send presences to participants +%% (and maybe document how to rejoin the kicked room) +-spec handle_conflict(muc_tuple(), muc_tuple()) -> muc_tuple(). +handle_conflict(Rec1, Rec2) when Rec1 > Rec2 -> + Rec1; +handle_conflict(_Rec1, Rec2) -> + Rec2. + +-spec register_room( + HostType :: mongooseim:host_type(), + MucHost :: jid:lserver(), + Room :: mod_muc:room(), + Pid :: pid()) -> ok | {exists, pid()} | {error, term()}. +register_room(HostType, MucHost, Room, Pid) -> + Tab = table_name(HostType), + Rec = {{MucHost, Room}, Pid}, + case find_room_pid(HostType, MucHost, Room) of + %% Race condition is possible + %% TODO use cets:insert_new/2 once available + %% Otherwise cets:insert could overwrite an existing registration + {ok, OtherPid} -> + {exists, OtherPid}; + {error, not_found} -> + cets:insert(Tab, Rec), + ok + end. + +%% Race condition is possible between register and room_destroyed +%% (Because register is outside of the room process) +-spec room_destroyed(mongooseim:host_type(), jid:server(), mod_muc:room(), pid()) -> ok. +room_destroyed(HostType, MucHost, Room, Pid) -> + Tab = table_name(HostType), + Rec = {{MucHost, Room}, Pid}, + cets:delete_object(Tab, Rec), + ok. + +find_room_pid(HostType, MucHost, Room) -> + Tab = table_name(HostType), + case ets:lookup(Tab, {MucHost, Room}) of + [{_, Pid}] -> + {ok, Pid}; + [] -> + {error, not_found} + end. + +%% This is used by MUC discovery but it is not very scalable. +%% This function should look like get_online_rooms(HostType, MucHost, AfterRoomName, Limit) +%% to reduce the load and still have pagination working. +get_online_rooms(HostType, MucHost) -> + Tab = table_name(HostType), + [#muc_online_room{name_host = {Room, MucHost}, pid = Pid} + || [Room, Pid] <- ets:match(Tab, {{MucHost, '$1'}, '$2'})]. + +node_cleanup(HostType, Node) -> + Tab = table_name(HostType), + Pattern = {'_', '$1'}, + Guard = {'==', {node, '$1'}, Node}, + ets:select_delete(Tab, [{Pattern, [Guard], [true]}]), + ok. From 7ca84d8c40391a5c9f4d71fb017c29ef20b960b7 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 31 Jul 2023 14:45:48 +0200 Subject: [PATCH 129/161] Fix dialyzer --- src/mod_muc.erl | 1 + src/muc/mongoose_muc_online_backend.erl | 30 ++++++++++++++++++++++++- src/muc/mongoose_muc_online_cets.erl | 13 +++++++++-- src/muc/mongoose_muc_online_mnesia.erl | 9 +++++++- 4 files changed, 49 insertions(+), 4 deletions(-) diff --git a/src/mod_muc.erl b/src/mod_muc.erl index a0e4368f079..cd56976d1c8 100644 --- a/src/mod_muc.erl +++ b/src/mod_muc.erl @@ -119,6 +119,7 @@ host_type :: host_type(), pid :: pid() }. +-export_type([muc_online_room/0]). -type room_event_data() :: #{ from_nick := nick(), diff --git a/src/muc/mongoose_muc_online_backend.erl b/src/muc/mongoose_muc_online_backend.erl index a4d6c3c1c70..3b0222478ca 100644 --- a/src/muc/mongoose_muc_online_backend.erl +++ b/src/muc/mongoose_muc_online_backend.erl @@ -11,6 +11,24 @@ %% Callbacks +-callback start(mongooseim:host_type(), gen_mod:module_opts()) -> ok. + +-callback register_room( + HostType :: mongooseim:host_type(), + MucHost :: jid:lserver(), + Room :: mod_muc:room(), + Pid :: pid()) -> ok | {exists, pid()} | {error, term()}. + +-callback room_destroyed(mongooseim:host_type(), jid:lserver(), mod_muc:room(), pid()) -> ok. + +-callback find_room_pid(mongooseim:host_type(), jid:server(), mod_muc:room()) -> + {ok, pid()} | {error, not_found}. + +-callback get_online_rooms(mongooseim:host_type(), jid:lserver()) -> + [mod_muc:muc_online_room()]. + +-callback node_cleanup(mongooseim:host_type(), node()) -> ok. + %% API Functions -spec start(mongooseim:host_type(), gen_mod:module_opts()) -> any(). @@ -24,23 +42,33 @@ tracked_funs() -> room_destroyed, get_online_rooms]. +-spec register_room( + HostType :: mongooseim:host_type(), + MucHost :: jid:lserver(), + Room :: mod_muc:room(), + Pid :: pid()) -> ok | {exists, pid()} | {error, term()}. register_room(HostType, MucHost, Room, Pid) -> Args = [HostType, MucHost, Room, Pid], mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). --spec room_destroyed(mongooseim:host_type(), jid:server(), mod_muc:room(), pid()) -> ok. +-spec room_destroyed(mongooseim:host_type(), jid:lserver(), mod_muc:room(), pid()) -> ok. room_destroyed(HostType, MucHost, Room, Pid) -> Args = [HostType, MucHost, Room, Pid], mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec find_room_pid(mongooseim:host_type(), jid:server(), mod_muc:room()) -> + {ok, pid()} | {error, not_found}. find_room_pid(HostType, MucHost, Room) -> Args = [HostType, MucHost, Room], mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec get_online_rooms(mongooseim:host_type(), jid:lserver()) -> + [mod_muc:muc_online_room()]. get_online_rooms(HostType, MucHost) -> Args = [HostType, MucHost], mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec node_cleanup(mongooseim:host_type(), node()) -> ok. node_cleanup(HostType, Node) -> Args = [HostType, Node], mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). diff --git a/src/muc/mongoose_muc_online_cets.erl b/src/muc/mongoose_muc_online_cets.erl index 6c97152aa10..aef9f830f39 100644 --- a/src/muc/mongoose_muc_online_cets.erl +++ b/src/muc/mongoose_muc_online_cets.erl @@ -1,4 +1,6 @@ -module(mongoose_muc_online_cets). +-behaviour(mongoose_muc_online_backend). + -export([start/2, register_room/4, room_destroyed/4, @@ -11,6 +13,8 @@ -include_lib("mod_muc.hrl"). %% Use MucHost first for prefix select optimization in get_online_rooms +%% We also don't want to send HostType in muc_online_room.host_type between CETS nodes +%% or store it -type muc_tuple() :: {{MucHost :: jid:lserver(), Room :: mod_muc:room()}, Pid :: pid()}. table_name(HostType) -> @@ -56,13 +60,15 @@ register_room(HostType, MucHost, Room, Pid) -> %% Race condition is possible between register and room_destroyed %% (Because register is outside of the room process) --spec room_destroyed(mongooseim:host_type(), jid:server(), mod_muc:room(), pid()) -> ok. +-spec room_destroyed(mongooseim:host_type(), jid:lserver(), mod_muc:room(), pid()) -> ok. room_destroyed(HostType, MucHost, Room, Pid) -> Tab = table_name(HostType), Rec = {{MucHost, Room}, Pid}, cets:delete_object(Tab, Rec), ok. +-spec find_room_pid(mongooseim:host_type(), jid:server(), mod_muc:room()) -> + {ok, pid()} | {error, not_found}. find_room_pid(HostType, MucHost, Room) -> Tab = table_name(HostType), case ets:lookup(Tab, {MucHost, Room}) of @@ -75,11 +81,14 @@ find_room_pid(HostType, MucHost, Room) -> %% This is used by MUC discovery but it is not very scalable. %% This function should look like get_online_rooms(HostType, MucHost, AfterRoomName, Limit) %% to reduce the load and still have pagination working. +-spec get_online_rooms(mongooseim:host_type(), jid:lserver()) -> + [mod_muc:muc_online_room()]. get_online_rooms(HostType, MucHost) -> Tab = table_name(HostType), - [#muc_online_room{name_host = {Room, MucHost}, pid = Pid} + [#muc_online_room{name_host = {Room, MucHost}, pid = Pid, host_type = HostType} || [Room, Pid] <- ets:match(Tab, {{MucHost, '$1'}, '$2'})]. +-spec node_cleanup(mongooseim:host_type(), node()) -> ok. node_cleanup(HostType, Node) -> Tab = table_name(HostType), Pattern = {'_', '$1'}, diff --git a/src/muc/mongoose_muc_online_mnesia.erl b/src/muc/mongoose_muc_online_mnesia.erl index 25b8b305163..435ed82b7f1 100644 --- a/src/muc/mongoose_muc_online_mnesia.erl +++ b/src/muc/mongoose_muc_online_mnesia.erl @@ -1,4 +1,6 @@ -module(mongoose_muc_online_mnesia). +-behaviour(mongoose_muc_online_backend). + -export([start/2, register_room/4, room_destroyed/4, @@ -36,7 +38,7 @@ register_room(HostType, MucHost, Room, Pid) -> %% Race condition is possible between register and room_destroyed %% (Because register is outside of the room process) --spec room_destroyed(mongooseim:host_type(), jid:server(), mod_muc:room(), pid()) -> ok. +-spec room_destroyed(mongooseim:host_type(), jid:lserver(), mod_muc:room(), pid()) -> ok. room_destroyed(HostType, MucHost, Room, Pid) -> Obj = #muc_online_room{name_host = {Room, MucHost}, host_type = HostType, pid = Pid}, @@ -49,6 +51,8 @@ simple_transaction_result({atomic, Res}) -> simple_transaction_result({aborted, Reason}) -> {error, Reason}. +-spec find_room_pid(mongooseim:host_type(), jid:server(), mod_muc:room()) -> + {ok, pid()} | {error, not_found}. find_room_pid(_HostType, MucHost, Room) -> case mnesia:dirty_read(muc_online_room, {Room, MucHost}) of [R] -> @@ -57,12 +61,15 @@ find_room_pid(_HostType, MucHost, Room) -> {error, not_found} end. +-spec get_online_rooms(mongooseim:host_type(), jid:lserver()) -> + [mod_muc:muc_online_room()]. get_online_rooms(_HostType, MucHost) -> mnesia:dirty_select(muc_online_room, [{#muc_online_room{name_host = '$1', _ = '_'}, [{'==', {element, 2, '$1'}, MucHost}], ['$_']}]). +-spec node_cleanup(mongooseim:host_type(), node()) -> ok. node_cleanup(HostType, Node) -> F = fun() -> Es = mnesia:select( From 5b11bf62cb3e6d9e341b403a826603058b0b80d6 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 31 Jul 2023 14:52:12 +0200 Subject: [PATCH 130/161] Fix unload_muc in tests --- big_tests/tests/gdpr_SUITE.erl | 2 +- big_tests/tests/graphql_muc_SUITE.erl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/big_tests/tests/gdpr_SUITE.erl b/big_tests/tests/gdpr_SUITE.erl index 699f1e23fee..e19c4a0dff3 100644 --- a/big_tests/tests/gdpr_SUITE.erl +++ b/big_tests/tests/gdpr_SUITE.erl @@ -316,7 +316,7 @@ end_per_testcase(CN, Config) when escalus:end_per_testcase(CN, Config); end_per_testcase(CN, Config) when CN =:= retrieve_inbox_muc; CN =:= remove_inbox_muc -> - muc_helper:unload_muc(Config), + muc_helper:unload_muc(), escalus:end_per_testcase(CN, Config); end_per_testcase(CN, Config) -> escalus_fresh:clean(), diff --git a/big_tests/tests/graphql_muc_SUITE.erl b/big_tests/tests/graphql_muc_SUITE.erl index 08a9782a563..b57d31dc99b 100644 --- a/big_tests/tests/graphql_muc_SUITE.erl +++ b/big_tests/tests/graphql_muc_SUITE.erl @@ -227,7 +227,7 @@ init_per_suite(Config) -> end_per_suite(Config) -> escalus_fresh:clean(), mongoose_helper:ensure_muc_clean(), - muc_helper:unload_muc(Config), + muc_helper:unload_muc(), dynamic_modules:restore_modules(Config), escalus:end_per_suite(Config). From 4f6a4f87b3ec8b3f062a8a728041155f868fdba9 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 31 Jul 2023 17:59:11 +0200 Subject: [PATCH 131/161] Use cets:insert_new/2 And retry --- rebar.lock | 2 +- src/muc/mongoose_muc_online_cets.erl | 16 +++++++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/rebar.lock b/rebar.lock index c97d3ad73f2..445c07bc131 100644 --- a/rebar.lock +++ b/rebar.lock @@ -8,7 +8,7 @@ {<<"certifi">>,{pkg,<<"certifi">>,<<"2.9.0">>},1}, {<<"cets">>, {git,"https://github.com/esl/cets.git", - {ref,"458e2e1df3fb51896fe334385bb0d2c9c53ef87f"}}, + {ref,"7d4876fe5285118f5349970ffb20080ea62293db"}}, 0}, {<<"cowboy">>,{pkg,<<"cowboy">>,<<"2.9.0">>},0}, {<<"cowboy_swagger">>,{pkg,<<"cowboy_swagger">>,<<"2.5.1">>},0}, diff --git a/src/muc/mongoose_muc_online_cets.erl b/src/muc/mongoose_muc_online_cets.erl index aef9f830f39..d9cf4e7a305 100644 --- a/src/muc/mongoose_muc_online_cets.erl +++ b/src/muc/mongoose_muc_online_cets.erl @@ -45,17 +45,23 @@ handle_conflict(_Rec1, Rec2) -> Room :: mod_muc:room(), Pid :: pid()) -> ok | {exists, pid()} | {error, term()}. register_room(HostType, MucHost, Room, Pid) -> + register_room(HostType, MucHost, Room, Pid, 3). + +register_room(_HostType, _MucHost, _Room, _Pid, 0) -> + {error, failed_to_register}; +register_room(HostType, MucHost, Room, Pid, Retries) -> Tab = table_name(HostType), Rec = {{MucHost, Room}, Pid}, case find_room_pid(HostType, MucHost, Room) of - %% Race condition is possible - %% TODO use cets:insert_new/2 once available - %% Otherwise cets:insert could overwrite an existing registration {ok, OtherPid} -> {exists, OtherPid}; {error, not_found} -> - cets:insert(Tab, Rec), - ok + case cets:insert_new(Tab, Rec) of + true -> + ok; + false -> + register_room(HostType, MucHost, Room, Pid, Retries - 1) + end end. %% Race condition is possible between register and room_destroyed From 579b730c41987118daf91f80be995c4ffcfd4888 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 31 Jul 2023 21:11:55 +0200 Subject: [PATCH 132/161] Implement clear_table function for tests --- big_tests/tests/mongoose_helper.erl | 3 ++- src/muc/mongoose_muc_online_backend.erl | 8 +++++++- src/muc/mongoose_muc_online_cets.erl | 12 +++++++++++- src/muc/mongoose_muc_online_mnesia.erl | 9 ++++++++- 4 files changed, 28 insertions(+), 4 deletions(-) diff --git a/big_tests/tests/mongoose_helper.erl b/big_tests/tests/mongoose_helper.erl index 502805ef364..bf87f05ed32 100644 --- a/big_tests/tests/mongoose_helper.erl +++ b/big_tests/tests/mongoose_helper.erl @@ -242,7 +242,8 @@ stop_online_rooms() -> false -> ct:fail({ejabberd_mod_muc_sup_not_found, Supervisor, HostType}) end, rpc(mim(), erlang, exit, [SupervisorPid, kill]), - rpc(mim(), mnesia, clear_table, [muc_online_room]), + %% That's a pretty dirty way + rpc(mim(), mongoose_muc_online_backend, clear_table, [HostType]), ok. forget_persistent_rooms() -> diff --git a/src/muc/mongoose_muc_online_backend.erl b/src/muc/mongoose_muc_online_backend.erl index 3b0222478ca..0380187907f 100644 --- a/src/muc/mongoose_muc_online_backend.erl +++ b/src/muc/mongoose_muc_online_backend.erl @@ -5,7 +5,8 @@ room_destroyed/4, find_room_pid/3, get_online_rooms/2, - node_cleanup/2]). + node_cleanup/2, + clear_table/1]). -define(MAIN_MODULE, mongoose_muc_online). @@ -29,6 +30,8 @@ -callback node_cleanup(mongooseim:host_type(), node()) -> ok. +-callback clear_table(mongooseim:host_type()) -> ok. + %% API Functions -spec start(mongooseim:host_type(), gen_mod:module_opts()) -> any(). @@ -72,3 +75,6 @@ get_online_rooms(HostType, MucHost) -> node_cleanup(HostType, Node) -> Args = [HostType, Node], mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +clear_table(HostType) -> + mongoose_backend:call(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, [HostType]). diff --git a/src/muc/mongoose_muc_online_cets.erl b/src/muc/mongoose_muc_online_cets.erl index d9cf4e7a305..f0c026f81eb 100644 --- a/src/muc/mongoose_muc_online_cets.erl +++ b/src/muc/mongoose_muc_online_cets.erl @@ -6,7 +6,8 @@ room_destroyed/4, find_room_pid/3, get_online_rooms/2, - node_cleanup/2]). + node_cleanup/2, + clear_table/1]). -export([handle_conflict/2]). @@ -101,3 +102,12 @@ node_cleanup(HostType, Node) -> Guard = {'==', {node, '$1'}, Node}, ets:select_delete(Tab, [{Pattern, [Guard], [true]}]), ok. + +%% Clear table for tests +-spec clear_table(mongooseim:host_type()) -> ok. +clear_table(HostType) -> + Tab = table_name(HostType), + ets:match_delete(Tab, '_'), + Nodes = cets:other_nodes(Tab), + [rpc:call(Node, ets, match_delete, [Tab, '_']) || Node <- Nodes], + ok. diff --git a/src/muc/mongoose_muc_online_mnesia.erl b/src/muc/mongoose_muc_online_mnesia.erl index 435ed82b7f1..6699b775744 100644 --- a/src/muc/mongoose_muc_online_mnesia.erl +++ b/src/muc/mongoose_muc_online_mnesia.erl @@ -6,7 +6,8 @@ room_destroyed/4, find_room_pid/3, get_online_rooms/2, - node_cleanup/2]). + node_cleanup/2, + clear_table/1]). -include_lib("mod_muc.hrl"). @@ -85,3 +86,9 @@ node_cleanup(HostType, Node) -> end, mnesia:async_dirty(F), ok. + +%% Clear table for tests +-spec clear_table(mongooseim:host_type()) -> ok. +clear_table(_HostType) -> + mnesia:clear_table(muc_online_room), + ok. From 8a49ce3681cb3c64cfcfc125ebc2b3eb1bc4fc8f Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 1 Aug 2023 08:05:46 +0200 Subject: [PATCH 133/161] Use mongoose_muc_online_backend in muc_helper:destroy_room --- big_tests/tests/muc_helper.erl | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/big_tests/tests/muc_helper.erl b/big_tests/tests/muc_helper.erl index bdd229936a0..39ba0950ee5 100644 --- a/big_tests/tests/muc_helper.erl +++ b/big_tests/tests/muc_helper.erl @@ -180,15 +180,26 @@ destroy_room(Config) -> destroy_room(muc_host(), ?config(room, Config)). destroy_room(Host, Room) when is_binary(Host), is_binary(Room) -> + HostType = domain_helper:host_type(), Room1 = jid:nodeprep(Room), - case rpc(mim(), ets, lookup, [muc_online_room, {Room1, Host}]) of - [{_,_,Pid}|_] -> + case rpc(mim(), mongoose_muc_online_backend, find_room_pid, [HostType, Host, Room1]) of + {ok, Pid} -> %% @TODO related to gen_fsm_compat: after migration to gen_statem %% should be replaced to - gen_statem:call(Pid, destroy). Pid ! {'$gen_all_state_event', destroy}, + wait_for_process_down(Pid), ok; - _ -> + {error, not_found} -> + ok + end. + +wait_for_process_down(Pid) -> + Ref = monitor(process, Pid), + receive + {'DOWN', Ref, _Type, Pid, _Info} -> ok + after 5000 -> + ct:fail(wait_for_process_down_failed) end. stanza_muc_enter_room(Room, Nick) -> From 09a83fe927f9842a60916c7529dd807388843903 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 1 Aug 2023 08:36:37 +0200 Subject: [PATCH 134/161] Add ignore_xref for mongoose_muc_online_backend:clear_table/1 --- src/muc/mongoose_muc_online_backend.erl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/muc/mongoose_muc_online_backend.erl b/src/muc/mongoose_muc_online_backend.erl index 0380187907f..3f94eb144ef 100644 --- a/src/muc/mongoose_muc_online_backend.erl +++ b/src/muc/mongoose_muc_online_backend.erl @@ -8,6 +8,9 @@ node_cleanup/2, clear_table/1]). +%% Used in tests +-ignore_xref([clear_table/1]). + -define(MAIN_MODULE, mongoose_muc_online). %% Callbacks From db8b3e025ec998fd04477b644e0654f246607a73 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 1 Aug 2023 15:30:25 +0200 Subject: [PATCH 135/161] Add tests for mongoose_muc_online_backend:node_cleanup/2 --- test/mongoose_cleanup_SUITE.erl | 80 +++++++++++++++++++++++++++++---- 1 file changed, 71 insertions(+), 9 deletions(-) diff --git a/test/mongoose_cleanup_SUITE.erl b/test/mongoose_cleanup_SUITE.erl index b70c96cac3d..717e9e51a8f 100644 --- a/test/mongoose_cleanup_SUITE.erl +++ b/test/mongoose_cleanup_SUITE.erl @@ -17,7 +17,9 @@ s2s/1, bosh/1, component/1, - component_from_other_node_remains/1 + component_from_other_node_remains/1, + muc_room/1, + muc_room_from_other_node_remains/1 ]). -define(HOST, <<"localhost">>). @@ -41,11 +43,16 @@ all() -> groups() -> [{component_cets, [], component_cases()}, - {component_mnesia, [], component_cases()}]. + {component_mnesia, [], component_cases()}, + {muc_cets, [], muc_cases()}, + {muc_mnesia, [], muc_cases()}]. component_cases() -> [component, component_from_other_node_remains]. +muc_cases() -> + [muc_room, muc_room_from_other_node_remains]. + init_per_suite(Config) -> {ok, _} = application:ensure_all_started(jid), ok = mnesia:create_schema([node()]), @@ -66,20 +73,21 @@ init_per_group(component_mnesia, Config) -> Config; init_per_group(component_cets, Config) -> mongoose_config:set_opt(component_backend, cets), - DiscoOpts = #{name => mongoose_cets_discovery, disco_file => "does_not_exist.txt"}, - {ok, _Pid} = cets_discovery:start(DiscoOpts), - Config. + start_cets_disco(Config); +init_per_group(muc_mnesia, Config) -> + [{muc_backend, mnesia} | Config]; +init_per_group(muc_cets, Config) -> + [{muc_backend, cets} | start_cets_disco(Config)]. -end_per_group(component_cets, _Config) -> - exit(whereis(mongoose_cets_discovery), kill); -end_per_group(_Group, _Config) -> - ok. +end_per_group(_Group, Config) -> + stop_cets_disco(Config). init_per_testcase(TestCase, Config) -> mim_ct_sup:start_link(ejabberd_sup), {ok, _HooksServer} = gen_hook:start_link(), setup_meck(meck_mods(TestCase)), start_component(TestCase), + start_muc_backend(Config), Config. end_per_testcase(TestCase, _Config) -> @@ -243,6 +251,26 @@ component_from_other_node_remains(_Config) -> mongoose_component:unregister_components(Comps), ok. +muc_room(_Config) -> + HostType = ?HOST, + MucHost = <<"muc.localhost">>, + Pid = remote_pid(), + Node = node(Pid), + Room = <<"remote_room">>, + ok = mongoose_muc_online_backend:register_room(HostType, MucHost, Room, Pid), + ok = mongoose_muc_online_backend:node_cleanup(HostType, Node), + {error, not_found} = mongoose_muc_online_backend:find_room_pid(HostType, MucHost, Room). + +muc_room_from_other_node_remains(_Config) -> + HostType = ?HOST, + MucHost = <<"muc.localhost">>, + Pid = self(), + RemoteNode = node(remote_pid()), + Room = <<"room_on_other_node">>, + ok = mongoose_muc_online_backend:register_room(HostType, MucHost, Room, Pid), + ok = mongoose_muc_online_backend:node_cleanup(HostType, RemoteNode), + {ok, Pid} = mongoose_muc_online_backend:find_room_pid(HostType, MucHost, Room). + %% ----------------------------------------------------- %% Internal %% ----------------------------------------------------- @@ -304,3 +332,37 @@ start(HostType, Module) -> start(HostType, Module, Opts) -> mongoose_modules:ensure_started(HostType, Module, Opts). + +disco_opts() -> + #{name => mongoose_cets_discovery, disco_file => "does_not_exist.txt"}. + +start_cets_disco(Config) -> + {ok, Pid} = cets_discovery:start(disco_opts()), + [{cets_disco, Pid} | Config]. + +stop_cets_disco(Config) -> + case proplists:get_value(cets_disco, Config) of + Pid when is_pid(Pid) -> + exit(Pid, kill); + _ -> + ok + end. + +%% Pid 90 on cool_node@localhost +%% Made using: +%% erl -name cool_node@localhost +%% rp(term_to_binary(list_to_pid("<0.90.0>"))). +remote_pid_binary() -> + <<131, 88, 100, 0, 19, 99, 111, 111, 108, 95, 110, 111, 100, 101, 64, + 108, 111, 99, 97, 108, 104, 111, 115, 116, 0, 0, 0, 90, 0, 0, 0, 0, 100, + 200, 255, 233>>. + +remote_pid() -> + binary_to_term(remote_pid_binary()). + +start_muc_backend(Config) -> + case proplists:get_value(muc_backend, Config) of + undefined -> ok; + Backend -> + mongoose_muc_online_backend:start(?HOST, #{online_backend => Backend}) + end. From 83f07130a3ea12c26deddbf24a3f71022415b608 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 2 Aug 2023 09:31:36 +0200 Subject: [PATCH 136/161] Use ets:match_object/2 instead of ets:select/2 Does the same, just a bit easier to read --- src/ejabberd_sm_cets.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/ejabberd_sm_cets.erl b/src/ejabberd_sm_cets.erl index d9b31d523e7..b602414797e 100644 --- a/src/ejabberd_sm_cets.erl +++ b/src/ejabberd_sm_cets.erl @@ -35,20 +35,20 @@ get_sessions(Server) -> %% A partially bound key is either a list or a tuple with %% a prefix that is fully bound. R = {{Server, '_', '_', '_'}, '_', '_'}, - Xs = ets:select(?TABLE, [{R, [], ['$_']}]), + Xs = ets:match_object(?TABLE, R), tuples_to_sessions(Xs). -spec get_sessions(jid:luser(), jid:lserver()) -> [ejabberd_sm:session()]. get_sessions(User, Server) -> R = {{Server, User, '_', '_'}, '_', '_'}, - Xs = ets:select(?TABLE, [{R, [], ['$_']}]), + Xs = ets:match_object(?TABLE, R), tuples_to_sessions(Xs). -spec get_sessions(jid:luser(), jid:lserver(), jid:lresource()) -> [ejabberd_sm:session()]. get_sessions(User, Server, Resource) -> R = {{Server, User, Resource, '_'}, '_', '_'}, - Xs = ets:select(?TABLE, [{R, [], ['$_']}]), + Xs = ets:match_object(?TABLE, R), %% TODO these sessions should be deduplicated. %% It is possible, that after merging two cets tables we could end up %% with sessions from two nodes for the same full jid. From 7fc4538aa93b5de771af1daf6c21b9405be2112e Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 2 Aug 2023 09:42:13 +0200 Subject: [PATCH 137/161] Use jid:lserver() in specs for mud muc backends --- src/muc/mongoose_muc_online_backend.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/muc/mongoose_muc_online_backend.erl b/src/muc/mongoose_muc_online_backend.erl index 3f94eb144ef..9bbf76f36cf 100644 --- a/src/muc/mongoose_muc_online_backend.erl +++ b/src/muc/mongoose_muc_online_backend.erl @@ -25,7 +25,7 @@ -callback room_destroyed(mongooseim:host_type(), jid:lserver(), mod_muc:room(), pid()) -> ok. --callback find_room_pid(mongooseim:host_type(), jid:server(), mod_muc:room()) -> +-callback find_room_pid(mongooseim:host_type(), jid:lserver(), mod_muc:room()) -> {ok, pid()} | {error, not_found}. -callback get_online_rooms(mongooseim:host_type(), jid:lserver()) -> @@ -62,7 +62,7 @@ room_destroyed(HostType, MucHost, Room, Pid) -> Args = [HostType, MucHost, Room, Pid], mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). --spec find_room_pid(mongooseim:host_type(), jid:server(), mod_muc:room()) -> +-spec find_room_pid(mongooseim:host_type(), jid:lserver(), mod_muc:room()) -> {ok, pid()} | {error, not_found}. find_room_pid(HostType, MucHost, Room) -> Args = [HostType, MucHost, Room], From b3bede20758649699d2ea73e08d7a864390b4467 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Wed, 2 Aug 2023 09:46:40 +0200 Subject: [PATCH 138/161] Use mongoose_backend:call instead of tracked in mongoose_muc_online_backend --- src/muc/mongoose_muc_online_backend.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/muc/mongoose_muc_online_backend.erl b/src/muc/mongoose_muc_online_backend.erl index 9bbf76f36cf..ebefdbec989 100644 --- a/src/muc/mongoose_muc_online_backend.erl +++ b/src/muc/mongoose_muc_online_backend.erl @@ -66,7 +66,7 @@ room_destroyed(HostType, MucHost, Room, Pid) -> {ok, pid()} | {error, not_found}. find_room_pid(HostType, MucHost, Room) -> Args = [HostType, MucHost, Room], - mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + mongoose_backend:call(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). -spec get_online_rooms(mongooseim:host_type(), jid:lserver()) -> [mod_muc:muc_online_room()]. @@ -77,7 +77,7 @@ get_online_rooms(HostType, MucHost) -> -spec node_cleanup(mongooseim:host_type(), node()) -> ok. node_cleanup(HostType, Node) -> Args = [HostType, Node], - mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + mongoose_backend:call(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). clear_table(HostType) -> mongoose_backend:call(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, [HostType]). From a7dfa255c48db7b8ff2d5390a510ebd054000ee3 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 3 Aug 2023 13:40:12 +0200 Subject: [PATCH 139/161] Call mod_jingle_sip_backend:remove_session --- src/jingle_sip/jingle_sip_callbacks.erl | 5 +++-- src/jingle_sip/mod_jingle_sip_backend.erl | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/jingle_sip/jingle_sip_callbacks.erl b/src/jingle_sip/jingle_sip_callbacks.erl index b4e06a5ecd2..09b742f721c 100644 --- a/src/jingle_sip/jingle_sip_callbacks.erl +++ b/src/jingle_sip/jingle_sip_callbacks.erl @@ -176,7 +176,7 @@ sip_bye(Req, _Call) -> from_jid => FromJID, to_jid => ToJID }), maybe_route_to_all_sessions(FromJID, ToJID, Acc, IQEl), - + ok = mod_jingle_sip_backend:remove_session(CallID), {reply, ok}. sip_cancel(_InviteReq, Req, _Call) -> @@ -194,7 +194,7 @@ sip_cancel(_InviteReq, Req, _Call) -> from_jid => FromJID, to_jid => ToJID }), maybe_route_to_all_sessions(FromJID, ToJID, Acc, IQEl), - + ok = mod_jingle_sip_backend:remove_session(CallID), {reply, ok}. sip_dialog_update(start, Dialog, Call) -> @@ -286,6 +286,7 @@ invite_resp_callback({resp, ErrorCode, SIPMsg, _Call}) from_jid => FromJID, to_jid => ToJID }), maybe_route_to_all_sessions(FromJID, ToJID, Acc, IQEl), + ok = mod_jingle_sip_backend:remove_session(CallID), ok; invite_resp_callback(Data) -> ?LOG_ERROR(#{what => sip_unknown_response, sip_data => Data}). diff --git a/src/jingle_sip/mod_jingle_sip_backend.erl b/src/jingle_sip/mod_jingle_sip_backend.erl index 06b9914670f..43c360e8483 100644 --- a/src/jingle_sip/mod_jingle_sip_backend.erl +++ b/src/jingle_sip/mod_jingle_sip_backend.erl @@ -38,7 +38,7 @@ -ignore_xref([remove_session/1]). --record(jingle_sip_session, {sid, +-record(jingle_sip_session, {sid, %% CallID dialog, state, direction, From 9849b1057d62bf6347a37be1c883cd67a04ff282 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 3 Aug 2023 17:08:38 +0200 Subject: [PATCH 140/161] Move mod_jingle_sip_backend into mod_jingle_sip_mnesia --- src/jingle_sip/mod_jingle_sip.erl | 7 +- src/jingle_sip/mod_jingle_sip_backend.erl | 192 +++-------------- src/jingle_sip/mod_jingle_sip_mnesia.erl | 239 ++++++++++++++++++++++ 3 files changed, 270 insertions(+), 168 deletions(-) create mode 100644 src/jingle_sip/mod_jingle_sip_mnesia.erl diff --git a/src/jingle_sip/mod_jingle_sip.erl b/src/jingle_sip/mod_jingle_sip.erl index 2fb46a48832..ef3d20378b0 100644 --- a/src/jingle_sip/mod_jingle_sip.erl +++ b/src/jingle_sip/mod_jingle_sip.erl @@ -96,7 +96,9 @@ config_spec() -> validate = ip_address}, <<"transport">> => #option{type = string, validate = {enum, ["udp", "tcp"]}}, - <<"username_to_phone">> => #list{items = username_to_phone_spec()} + <<"username_to_phone">> => #list{items = username_to_phone_spec()}, + <<"backend">> => #option{type = atom, + validate = {module, mod_jingle_sip}} }, defaults = #{<<"proxy_host">> => "localhost", <<"proxy_port">> => 5060, @@ -104,7 +106,8 @@ config_spec() -> <<"local_host">> => "localhost", <<"sdp_origin">> => "127.0.0.1", <<"transport">> => "udp", - <<"username_to_phone">> => []} + <<"username_to_phone">> => [], + <<"backend">> => mnesia} }. username_to_phone_spec() -> diff --git a/src/jingle_sip/mod_jingle_sip_backend.erl b/src/jingle_sip/mod_jingle_sip_backend.erl index 43c360e8483..1199aa550a7 100644 --- a/src/jingle_sip/mod_jingle_sip_backend.erl +++ b/src/jingle_sip/mod_jingle_sip_backend.erl @@ -38,201 +38,61 @@ -ignore_xref([remove_session/1]). --record(jingle_sip_session, {sid, %% CallID - dialog, - state, - direction, - request, - node, - owner, - from, - to, - now, - meta}). - -init(_Host, _Opts) -> - mnesia:create_table(jingle_sip_session, - [{ram_copies, [node()]}, - {attributes, record_info(fields, jingle_sip_session)}]). +-define(MAIN_MODULE, mod_jingle_sip). + +init(Host, Opts) -> + Args = [Host, Opts], + mongoose_backend:init(global, ?MAIN_MODULE, [], Opts), + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). -spec set_incoming_request(CallID :: call_id(), ReqID :: binary(), From :: jid:jid(), To :: jid:jid(), exml:element()) -> ok | {error, any()}. set_incoming_request(CallID, ReqID, From, To, JingleEl) -> - TFun = pa:bind(fun set_incoming_request_tr/5, CallID, ReqID, From, To, JingleEl), - run_transaction(TFun). - -set_incoming_request_tr(CallID, ReqID, From, To, JingleEl) -> - Owner = jid:to_lus(To), - case mnesia:wread({jingle_sip_session, CallID}) of - [_] -> - {error, sid_already_exists}; - _ -> - Meta = #{init_stanza => JingleEl}, - Session = #jingle_sip_session{sid = CallID, - request = ReqID, - dialog = undefined, - state = undefined, - direction = in, - node = node(), - from = jid:to_lus(From), - to = Owner, - owner = Owner, - now = os:system_time(microsecond), - meta = Meta}, - mnesia:write(Session) - end. + Args = [CallID, ReqID, From, To, JingleEl], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). -spec set_outgoing_request(CallID :: call_id(), ReqID :: binary(), From :: jid:jid(), To :: jid:jid()) -> ok | {error, any()}. set_outgoing_request(CallID, ReqID, From, To) -> - TFun = pa:bind(fun set_outgoing_request_tr/4, CallID, ReqID, From, To), - run_transaction(TFun). - -set_outgoing_request_tr(CallID, ReqID, From, To) -> - Owner = jid:to_lus(From), - case mnesia:wread({jingle_sip_session, CallID}) of - [_] -> - {error, sid_already_exists}; - _ -> - Session = #jingle_sip_session{sid = CallID, - request = ReqID, - dialog = undefined, - state = undefined, - direction = out, - node = node(), - from = Owner, - to = jid:to_lus(To), - owner = Owner, - now = os:system_time(microsecond), - meta = #{}}, - mnesia:write(Session) - end. - -run_transaction(TFun) -> - case mnesia:transaction(TFun) of - {atomic, Result} -> - Result; - {aborted, Reason} -> - {error, Reason} - end. - + Args = [CallID, ReqID, From, To], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). set_incoming_handle(CallID, DialogHandle) -> - TFun = pa:bind(fun set_incoming_handle_tr/2, CallID, DialogHandle), - run_transaction(TFun). - -set_incoming_handle_tr(CallID, DialogHandle) -> - case mnesia:wread({jingle_sip_session, CallID}) of - [#jingle_sip_session{dialog = undefined, direction = in} = Session] -> - Session2 = Session#jingle_sip_session{dialog = DialogHandle, - node = node()}, - mnesia:write(Session2); - [_] -> - {error, incoming_handle_exists}; - _ -> - {error, not_found} - end. + Args = [CallID, DialogHandle], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). set_outgoing_handle(CallID, DialogHandle, From, To) -> - TFun = pa:bind(fun set_outgoing_handle_tr/4, CallID, DialogHandle, From, To), - run_transaction(TFun). - -set_outgoing_handle_tr(CallID, DialogHandle, _From, _To) -> - case mnesia:wread({jingle_sip_session, CallID}) of - [#jingle_sip_session{dialog = undefined, direction = out} = Session] -> - Session2 = Session#jingle_sip_session{dialog = DialogHandle, - node = node()}, - mnesia:write(Session2); - [_] -> - {error, outgoing_handle_exists}; - _ -> - Session = #jingle_sip_session{sid = CallID, - dialog = DialogHandle, - node = node(), - direction = out, - state = ringing}, - mnesia:write(Session) - end. + Args = [CallID, DialogHandle, From, To], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). set_incoming_accepted(CallID) -> - TFun = pa:bind(fun set_incoming_accepted_tr/1, CallID), - run_transaction(TFun). - -set_incoming_accepted_tr(CallID) -> - case mnesia:wread({jingle_sip_session, CallID}) of - [#jingle_sip_session{direction = in, meta = Meta} = Session] -> - MetaWithoutInitStanza = maps:without([init_stanza], Meta), - Session2 = Session#jingle_sip_session{state = accepted, - meta = MetaWithoutInitStanza}, - mnesia:write(Session2); - _ -> - {error, not_found} - end. + Args = [CallID], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). set_outgoing_accepted(CallID) -> - TFun = pa:bind(fun set_outgoing_accepted_tr/1, CallID), - run_transaction(TFun). - -set_outgoing_accepted_tr(CallID) -> - case mnesia:wread({jingle_sip_session, CallID}) of - [#jingle_sip_session{direction = out} = Session] -> - Session2 = Session#jingle_sip_session{state = accepted}, - mnesia:write(Session2); - _ -> - {error, not_found} - end. + Args = [CallID], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). -spec get_incoming_request(call_id(), jid:jid()) -> {ok, undefined | incoming_request()} | {error, not_found}. get_incoming_request(CallID, User) -> - UserUS = jid:to_lus(User), - case mnesia:dirty_read(jingle_sip_session, CallID) of - [#jingle_sip_session{request = ReqID, node = Node, owner = UserUS}] -> - {ok, {Node, ReqID}}; - _ -> - {error, not_found} - end. + Args = [CallID, User], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). -spec get_outgoing_handle(call_id(), jid:jid()) -> {ok, undefined | outgoing_handle()} | {error, not_found}. get_outgoing_handle(SID, User) -> - UserUS = jid:to_lus(User), - case mnesia:dirty_read(jingle_sip_session, SID) of - [#jingle_sip_session{dialog = Handle, owner = UserUS}] -> - {ok, Handle}; - _ -> - {error, not_found} - end. + Args = [SID, User], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). -spec get_session_info(binary(), jid:jid()) -> {ok, map()} | {error, any()}. get_session_info(SID, User) -> - UserUS = jid:to_lus(User), - case mnesia:dirty_read(jingle_sip_session, SID) of - [#jingle_sip_session{sid = SID, - dialog = Handle, - request = Request, - state = State, - direction = Dir, - node = ONode, - owner = UserUS, - to = To, - from = From, - meta = Meta}] -> - {ok, #{sid => SID, - dialog => Handle, - request => Request, - state => State, - direction => Dir, - node => ONode, - from => From, - to => To, - meta => Meta}}; - _ -> - {error, not_found} - end. + Args = [SID, User], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). remove_session(CallID) -> - mnesia:dirty_delete(jingle_sip_session, CallID). + Args = [CallID], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). diff --git a/src/jingle_sip/mod_jingle_sip_mnesia.erl b/src/jingle_sip/mod_jingle_sip_mnesia.erl new file mode 100644 index 00000000000..937325c437f --- /dev/null +++ b/src/jingle_sip/mod_jingle_sip_mnesia.erl @@ -0,0 +1,239 @@ +%% @doc Backend module for mod_jingle_sip +%% @author Michal Piotrowski +%% +%%============================================================================== +%% Copyright 2018 Erlang Solutions Ltd. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%============================================================================== +-module(mod_jingle_sip_mnesia). +-behaviour(mod_jingle_sip_backend). + +-include("mongoose.hrl"). + +-type call_id() :: binary(). +-type incoming_request() :: {node(), binary()}. +-type outgoing_handle() :: binary(). + +-export([init/2]). +-export([set_incoming_request/5]). +-export([set_incoming_handle/2]). +-export([set_outgoing_request/4]). +-export([set_outgoing_handle/4]). +-export([set_outgoing_accepted/1]). +-export([set_incoming_accepted/1]). +-export([get_incoming_request/2]). +-export([get_outgoing_handle/2]). +-export([get_session_info/2]). +-export([remove_session/1]). + +-ignore_xref([remove_session/1]). + +-record(jingle_sip_session, {sid, %% CallID + dialog, + state, + direction, + request, + node, + owner, + from, + to, + now, + meta}). + +init(_Host, _Opts) -> + mnesia:create_table(jingle_sip_session, + [{ram_copies, [node()]}, + {attributes, record_info(fields, jingle_sip_session)}]). + +-spec set_incoming_request(CallID :: call_id(), ReqID :: binary(), + From :: jid:jid(), To :: jid:jid(), exml:element()) -> + ok | {error, any()}. +set_incoming_request(CallID, ReqID, From, To, JingleEl) -> + TFun = pa:bind(fun set_incoming_request_tr/5, CallID, ReqID, From, To, JingleEl), + run_transaction(TFun). + +set_incoming_request_tr(CallID, ReqID, From, To, JingleEl) -> + Owner = jid:to_lus(To), + case mnesia:wread({jingle_sip_session, CallID}) of + [_] -> + {error, sid_already_exists}; + _ -> + Meta = #{init_stanza => JingleEl}, + Session = #jingle_sip_session{sid = CallID, + request = ReqID, + dialog = undefined, + state = undefined, + direction = in, + node = node(), + from = jid:to_lus(From), + to = Owner, + owner = Owner, + now = os:system_time(microsecond), + meta = Meta}, + mnesia:write(Session) + end. + +-spec set_outgoing_request(CallID :: call_id(), ReqID :: binary(), + From :: jid:jid(), To :: jid:jid()) -> + ok | {error, any()}. +set_outgoing_request(CallID, ReqID, From, To) -> + TFun = pa:bind(fun set_outgoing_request_tr/4, CallID, ReqID, From, To), + run_transaction(TFun). + +set_outgoing_request_tr(CallID, ReqID, From, To) -> + Owner = jid:to_lus(From), + case mnesia:wread({jingle_sip_session, CallID}) of + [_] -> + {error, sid_already_exists}; + _ -> + Session = #jingle_sip_session{sid = CallID, + request = ReqID, + dialog = undefined, + state = undefined, + direction = out, + node = node(), + from = Owner, + to = jid:to_lus(To), + owner = Owner, + now = os:system_time(microsecond), + meta = #{}}, + mnesia:write(Session) + end. + +run_transaction(TFun) -> + case mnesia:transaction(TFun) of + {atomic, Result} -> + Result; + {aborted, Reason} -> + {error, Reason} + end. + + +set_incoming_handle(CallID, DialogHandle) -> + TFun = pa:bind(fun set_incoming_handle_tr/2, CallID, DialogHandle), + run_transaction(TFun). + +set_incoming_handle_tr(CallID, DialogHandle) -> + case mnesia:wread({jingle_sip_session, CallID}) of + [#jingle_sip_session{dialog = undefined, direction = in} = Session] -> + Session2 = Session#jingle_sip_session{dialog = DialogHandle, + node = node()}, + mnesia:write(Session2); + [_] -> + {error, incoming_handle_exists}; + _ -> + {error, not_found} + end. + +set_outgoing_handle(CallID, DialogHandle, From, To) -> + TFun = pa:bind(fun set_outgoing_handle_tr/4, CallID, DialogHandle, From, To), + run_transaction(TFun). + +set_outgoing_handle_tr(CallID, DialogHandle, _From, _To) -> + case mnesia:wread({jingle_sip_session, CallID}) of + [#jingle_sip_session{dialog = undefined, direction = out} = Session] -> + Session2 = Session#jingle_sip_session{dialog = DialogHandle, + node = node()}, + mnesia:write(Session2); + [_] -> + {error, outgoing_handle_exists}; + _ -> + Session = #jingle_sip_session{sid = CallID, + dialog = DialogHandle, + node = node(), + direction = out, + state = ringing}, + mnesia:write(Session) + end. + +set_incoming_accepted(CallID) -> + TFun = pa:bind(fun set_incoming_accepted_tr/1, CallID), + run_transaction(TFun). + +set_incoming_accepted_tr(CallID) -> + case mnesia:wread({jingle_sip_session, CallID}) of + [#jingle_sip_session{direction = in, meta = Meta} = Session] -> + MetaWithoutInitStanza = maps:without([init_stanza], Meta), + Session2 = Session#jingle_sip_session{state = accepted, + meta = MetaWithoutInitStanza}, + mnesia:write(Session2); + _ -> + {error, not_found} + end. + +set_outgoing_accepted(CallID) -> + TFun = pa:bind(fun set_outgoing_accepted_tr/1, CallID), + run_transaction(TFun). + +set_outgoing_accepted_tr(CallID) -> + case mnesia:wread({jingle_sip_session, CallID}) of + [#jingle_sip_session{direction = out} = Session] -> + Session2 = Session#jingle_sip_session{state = accepted}, + mnesia:write(Session2); + _ -> + {error, not_found} + end. + +-spec get_incoming_request(call_id(), jid:jid()) -> {ok, undefined | incoming_request()} | + {error, not_found}. +get_incoming_request(CallID, User) -> + UserUS = jid:to_lus(User), + case mnesia:dirty_read(jingle_sip_session, CallID) of + [#jingle_sip_session{request = ReqID, node = Node, owner = UserUS}] -> + {ok, {Node, ReqID}}; + _ -> + {error, not_found} + end. + +-spec get_outgoing_handle(call_id(), jid:jid()) -> {ok, undefined | outgoing_handle()} | + {error, not_found}. +get_outgoing_handle(SID, User) -> + UserUS = jid:to_lus(User), + case mnesia:dirty_read(jingle_sip_session, SID) of + [#jingle_sip_session{dialog = Handle, owner = UserUS}] -> + {ok, Handle}; + _ -> + {error, not_found} + end. + +-spec get_session_info(binary(), jid:jid()) -> + {ok, map()} | {error, any()}. +get_session_info(SID, User) -> + UserUS = jid:to_lus(User), + case mnesia:dirty_read(jingle_sip_session, SID) of + [#jingle_sip_session{sid = SID, + dialog = Handle, + request = Request, + state = State, + direction = Dir, + node = ONode, + owner = UserUS, + to = To, + from = From, + meta = Meta}] -> + {ok, #{sid => SID, + dialog => Handle, + request => Request, + state => State, + direction => Dir, + node => ONode, + from => From, + to => To, + meta => Meta}}; + _ -> + {error, not_found} + end. + +remove_session(CallID) -> + mnesia:dirty_delete(jingle_sip_session, CallID). From f3cfacb33833dee1273e2fdaaa5c151285e1437e Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 3 Aug 2023 18:19:39 +0200 Subject: [PATCH 141/161] Update CETS library --- rebar.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rebar.lock b/rebar.lock index 445c07bc131..41a8aeb49be 100644 --- a/rebar.lock +++ b/rebar.lock @@ -8,7 +8,7 @@ {<<"certifi">>,{pkg,<<"certifi">>,<<"2.9.0">>},1}, {<<"cets">>, {git,"https://github.com/esl/cets.git", - {ref,"7d4876fe5285118f5349970ffb20080ea62293db"}}, + {ref,"b4fefc99af7e5a729e47e1edf6453426941da766"}}, 0}, {<<"cowboy">>,{pkg,<<"cowboy">>,<<"2.9.0">>},0}, {<<"cowboy_swagger">>,{pkg,<<"cowboy_swagger">>,<<"2.5.1">>},0}, From 9dec04b130e5916f9030b8a344804ae2e4081661 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 3 Aug 2023 18:20:42 +0200 Subject: [PATCH 142/161] Add mod_jingle_sip_cets Make mod_jingle_sip backend configurable --- big_tests/test.config | 1 + big_tests/tests/jingle_SUITE.erl | 13 +- src/jingle_sip/mod_jingle_sip_backend.erl | 48 ++++- src/jingle_sip/mod_jingle_sip_cets.erl | 232 ++++++++++++++++++++++ src/jingle_sip/mod_jingle_sip_mnesia.erl | 15 +- test/common/config_parser_helper.erl | 2 +- test/config_parser_SUITE.erl | 2 + 7 files changed, 304 insertions(+), 9 deletions(-) create mode 100644 src/jingle_sip/mod_jingle_sip_cets.erl diff --git a/big_tests/test.config b/big_tests/test.config index c52100ba542..ba22723b189 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -245,6 +245,7 @@ {s2s_backend, "\"cets\""}, {stream_management_backend, cets}, {muc_online_backend, cets}, + {jingle_sip_backend, cets}, {auth_method, "rdbms"}, {internal_databases, "[internal_databases.cets] cluster_name = \"{{cluster_name}}\" diff --git a/big_tests/tests/jingle_SUITE.erl b/big_tests/tests/jingle_SUITE.erl index c25d61f402d..bfc1698bfc7 100644 --- a/big_tests/tests/jingle_SUITE.erl +++ b/big_tests/tests/jingle_SUITE.erl @@ -59,7 +59,7 @@ init_per_suite(Config) -> case rpc(mim(), application, get_application, [nksip]) of {ok, nksip} -> distributed_helper:add_node_to_cluster(mim2(), Config), - start_nksip_in_mim_nodes(), + start_nksip_in_mim_nodes(Config), application:ensure_all_started(esip), spawn(fun() -> ets:new(jingle_sip_translator, [public, named_table]), ets:new(jingle_sip_translator_bindings, [public, named_table]), @@ -71,9 +71,9 @@ init_per_suite(Config) -> {skip, build_was_not_configured_with_jingle_sip} end. -start_nksip_in_mim_nodes() -> - Pid1 = start_nskip_in_parallel(mim(), #{}), - Pid2 = start_nskip_in_parallel(mim2(), #{listen_port => 12346}), +start_nksip_in_mim_nodes(Config) -> + Pid1 = start_nskip_in_parallel(Config, mim(), #{}), + Pid2 = start_nskip_in_parallel(Config, mim2(), #{listen_port => 12346}), wait_for_process_to_stop(Pid1), wait_for_process_to_stop(Pid2). @@ -85,10 +85,11 @@ wait_for_process_to_stop(Pid) -> ct:fail(wait_for_process_to_stop_timeout) end. -start_nskip_in_parallel(NodeSpec, ExtraOpts) -> +start_nskip_in_parallel(Config, NodeSpec, ExtraOpts) -> Domain = domain(), Opts = #{proxy_host => <<"localhost">>, - proxy_port => 12345}, + proxy_port => 12345, + backend => ct_helper:get_preset_var(Config, jingle_sip_backend, mnesia)}, OptsWithExtra = maps:merge(Opts, ExtraOpts), AllOpts = config_parser_helper:mod_config(mod_jingle_sip, OptsWithExtra), RPCSpec = NodeSpec#{timeout => timer:seconds(60)}, diff --git a/src/jingle_sip/mod_jingle_sip_backend.erl b/src/jingle_sip/mod_jingle_sip_backend.erl index 1199aa550a7..6a3f665ca5e 100644 --- a/src/jingle_sip/mod_jingle_sip_backend.erl +++ b/src/jingle_sip/mod_jingle_sip_backend.erl @@ -23,6 +23,7 @@ -type call_id() :: binary(). -type incoming_request() :: {node(), binary()}. -type outgoing_handle() :: binary(). +-type dialog_hangle() :: term(). -export([init/2]). -export([set_incoming_request/5]). @@ -40,6 +41,41 @@ -define(MAIN_MODULE, mod_jingle_sip). +-callback init(mongooseim:host_type(), gen_mod:module_opts()) -> ok. + +-callback set_incoming_request(CallID :: call_id(), ReqID :: binary(), + From :: jid:jid(), To :: jid:jid(), exml:element()) -> + ok | {error, any()}. + +-callback set_outgoing_request(CallID :: call_id(), ReqID :: binary(), + From :: jid:jid(), To :: jid:jid()) -> + ok | {error, any()}. + +-callback set_incoming_handle(CallID :: call_id(), DialogHandle :: dialog_hangle()) -> + ok | {error, any()}. + +-callback set_outgoing_handle(CallID :: call_id(), DialogHandle :: dialog_hangle(), + From :: jid:jid(), To :: jid:jid()) -> + ok | {error, any()}. + +-callback set_incoming_accepted(CallID :: call_id()) -> + ok | {error, any()}. + +-callback set_outgoing_accepted(CallID :: call_id()) -> + ok | {error, any()}. + +-callback get_incoming_request(call_id(), jid:jid()) -> + {ok, undefined | incoming_request()} | {error, not_found}. + +-callback get_outgoing_handle(call_id(), jid:jid()) -> + {ok, undefined | outgoing_handle()} | {error, not_found}. + +-callback get_session_info(call_id(), jid:jid()) -> + {ok, map()} | {error, any()}. + +-callback remove_session(call_id()) -> ok. + +-spec init(mongooseim:host_type(), gen_mod:module_opts()) -> ok. init(Host, Opts) -> Args = [Host, Opts], mongoose_backend:init(global, ?MAIN_MODULE, [], Opts), @@ -59,18 +95,27 @@ set_outgoing_request(CallID, ReqID, From, To) -> Args = [CallID, ReqID, From, To], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec set_incoming_handle(CallID :: call_id(), DialogHandle :: dialog_hangle()) -> + ok | {error, any()}. set_incoming_handle(CallID, DialogHandle) -> Args = [CallID, DialogHandle], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec set_outgoing_handle(CallID :: call_id(), DialogHandle :: dialog_hangle(), + From :: jid:jid(), To :: jid:jid()) -> + ok | {error, any()}. set_outgoing_handle(CallID, DialogHandle, From, To) -> Args = [CallID, DialogHandle, From, To], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec set_incoming_accepted(CallID :: call_id()) -> + ok | {error, any()}. set_incoming_accepted(CallID) -> Args = [CallID], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec set_outgoing_accepted(CallID :: call_id()) -> + ok | {error, any()}. set_outgoing_accepted(CallID) -> Args = [CallID], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). @@ -87,12 +132,13 @@ get_outgoing_handle(SID, User) -> Args = [SID, User], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). --spec get_session_info(binary(), jid:jid()) -> +-spec get_session_info(call_id(), jid:jid()) -> {ok, map()} | {error, any()}. get_session_info(SID, User) -> Args = [SID, User], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec remove_session(call_id()) -> ok. remove_session(CallID) -> Args = [CallID], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). diff --git a/src/jingle_sip/mod_jingle_sip_cets.erl b/src/jingle_sip/mod_jingle_sip_cets.erl new file mode 100644 index 00000000000..713f4c87ed8 --- /dev/null +++ b/src/jingle_sip/mod_jingle_sip_cets.erl @@ -0,0 +1,232 @@ +%% @doc Backend module for mod_jingle_sip +%% @author Michal Piotrowski +%% +%%============================================================================== +%% Copyright 2018 Erlang Solutions Ltd. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%============================================================================== +-module(mod_jingle_sip_cets). +-behaviour(mod_jingle_sip_backend). + +-include("mongoose.hrl"). + +-type call_id() :: binary(). +-type incoming_request() :: {node(), binary()}. +-type outgoing_handle() :: binary(). +-type dialog_hangle() :: term(). + +-export([init/2]). +-export([set_incoming_request/5]). +-export([set_incoming_handle/2]). +-export([set_outgoing_request/4]). +-export([set_outgoing_handle/4]). +-export([set_outgoing_accepted/1]). +-export([set_incoming_accepted/1]). +-export([get_incoming_request/2]). +-export([get_outgoing_handle/2]). +-export([get_session_info/2]). +-export([remove_session/1]). + +-ignore_xref([remove_session/1]). + +-record(jingle_sip_session, {sid, %% CallID + dialog, + state, + direction, + request, + node, + owner, + from, + to, + now, + meta}). + +-define(TABLE, cets_jingle_sip_session). + +-spec init(mongooseim:host_type(), gen_mod:module_opts()) -> ok. +init(_Host, _Opts) -> + cets:start(?TABLE, #{keypos => 2}), + cets_discovery:add_table(mongoose_cets_discovery, ?TABLE). + +-spec set_incoming_request(CallID :: call_id(), ReqID :: binary(), + From :: jid:jid(), To :: jid:jid(), exml:element()) -> + ok | {error, any()}. +set_incoming_request(CallID, ReqID, From, To, JingleEl) -> + Owner = jid:to_lus(To), + Meta = #{init_stanza => JingleEl}, + Session = #jingle_sip_session{sid = CallID, + request = ReqID, + dialog = undefined, + state = undefined, + direction = in, + node = node(), + from = jid:to_lus(From), + to = Owner, + owner = Owner, + now = os:system_time(microsecond), + meta = Meta}, + write_new_session(CallID, Session). + +-spec set_outgoing_request(CallID :: call_id(), ReqID :: binary(), + From :: jid:jid(), To :: jid:jid()) -> + ok | {error, any()}. +set_outgoing_request(CallID, ReqID, From, To) -> + Owner = jid:to_lus(From), + Session = #jingle_sip_session{sid = CallID, + request = ReqID, + dialog = undefined, + state = undefined, + direction = out, + node = node(), + from = Owner, + to = jid:to_lus(To), + owner = Owner, + now = os:system_time(microsecond), + meta = #{}}, + write_new_session(CallID, Session). + +-spec set_incoming_handle(CallID :: call_id(), DialogHandle :: dialog_hangle()) -> + ok | {error, any()}. +set_incoming_handle(CallID, DialogHandle) -> + case read_session(CallID) of + [#jingle_sip_session{dialog = undefined, direction = in} = Session] -> + Session2 = Session#jingle_sip_session{dialog = DialogHandle, + node = node()}, + update_session(Session2); + [_] -> + {error, incoming_handle_exists}; + _ -> + {error, not_found} + end. + +-spec set_outgoing_handle(CallID :: call_id(), DialogHandle :: dialog_hangle(), + From :: jid:jid(), To :: jid:jid()) -> + ok | {error, any()}. +set_outgoing_handle(CallID, DialogHandle, _From, _To) -> + case read_session(CallID) of + [#jingle_sip_session{dialog = undefined, direction = out} = Session] -> + Session2 = Session#jingle_sip_session{dialog = DialogHandle, + node = node()}, + update_session(Session2); + [_] -> + {error, outgoing_handle_exists}; + _ -> + Session = #jingle_sip_session{sid = CallID, + dialog = DialogHandle, + node = node(), + direction = out, + state = ringing}, + cets:insert_new(?TABLE, Session) + end. + +-spec set_incoming_accepted(CallID :: call_id()) -> + ok | {error, any()}. +set_incoming_accepted(CallID) -> + case read_session(CallID) of + [#jingle_sip_session{direction = in, meta = Meta} = Session] -> + MetaWithoutInitStanza = maps:without([init_stanza], Meta), + Session2 = Session#jingle_sip_session{state = accepted, + meta = MetaWithoutInitStanza}, + update_session(Session2); + _ -> + {error, not_found} + end. + +-spec set_outgoing_accepted(CallID :: call_id()) -> + ok | {error, any()}. +set_outgoing_accepted(CallID) -> + case read_session(CallID) of + [#jingle_sip_session{direction = out} = Session] -> + Session2 = Session#jingle_sip_session{state = accepted}, + update_session(Session2); + _ -> + {error, not_found} + end. + +-spec get_incoming_request(call_id(), jid:jid()) -> {ok, undefined | incoming_request()} | + {error, not_found}. +get_incoming_request(CallID, User) -> + UserUS = jid:to_lus(User), + case read_session(CallID) of + [#jingle_sip_session{request = ReqID, node = Node, owner = UserUS}] -> + {ok, {Node, ReqID}}; + _ -> + {error, not_found} + end. + +-spec get_outgoing_handle(call_id(), jid:jid()) -> {ok, undefined | outgoing_handle()} | + {error, not_found}. +get_outgoing_handle(SID, User) -> + UserUS = jid:to_lus(User), + case read_session(SID) of + [#jingle_sip_session{dialog = Handle, owner = UserUS}] -> + {ok, Handle}; + _ -> + {error, not_found} + end. + +-spec get_session_info(binary(), jid:jid()) -> + {ok, map()} | {error, any()}. +get_session_info(SID, User) -> + UserUS = jid:to_lus(User), + case read_session(SID) of + [#jingle_sip_session{sid = SID, + dialog = Handle, + request = Request, + state = State, + direction = Dir, + node = ONode, + owner = UserUS, + to = To, + from = From, + meta = Meta}] -> + {ok, #{sid => SID, + dialog => Handle, + request => Request, + state => State, + direction => Dir, + node => ONode, + from => From, + to => To, + meta => Meta}}; + _ -> + {error, not_found} + end. + +-spec remove_session(call_id()) -> ok. +remove_session(CallID) -> + cets:delete(?TABLE, CallID). + +-spec read_session(call_id()) -> [#jingle_sip_session{}]. +read_session(CallID) -> + ets:lookup(?TABLE, CallID). + +-spec write_new_session(call_id(), #jingle_sip_session{}) -> + ok | {error, sid_already_exists}. +write_new_session(CallID, Session) -> + case read_session(CallID) of + [_] -> + {error, sid_already_exists}; + _ -> + case cets:insert_new(?TABLE, Session) of + true -> + ok; + false -> + {error, sid_already_exists} + end + end. + +-spec update_session(#jingle_sip_session{}) -> ok. +update_session(Session) -> + cets:insert(?TABLE, Session). diff --git a/src/jingle_sip/mod_jingle_sip_mnesia.erl b/src/jingle_sip/mod_jingle_sip_mnesia.erl index 937325c437f..22e177c0a72 100644 --- a/src/jingle_sip/mod_jingle_sip_mnesia.erl +++ b/src/jingle_sip/mod_jingle_sip_mnesia.erl @@ -24,6 +24,7 @@ -type call_id() :: binary(). -type incoming_request() :: {node(), binary()}. -type outgoing_handle() :: binary(). +-type dialog_hangle() :: term(). -export([init/2]). -export([set_incoming_request/5]). @@ -51,10 +52,12 @@ now, meta}). +-spec init(mongooseim:host_type(), gen_mod:module_opts()) -> ok. init(_Host, _Opts) -> mnesia:create_table(jingle_sip_session, [{ram_copies, [node()]}, - {attributes, record_info(fields, jingle_sip_session)}]). + {attributes, record_info(fields, jingle_sip_session)}]), + ok. -spec set_incoming_request(CallID :: call_id(), ReqID :: binary(), From :: jid:jid(), To :: jid:jid(), exml:element()) -> @@ -120,6 +123,8 @@ run_transaction(TFun) -> end. +-spec set_incoming_handle(CallID :: call_id(), DialogHandle :: dialog_hangle()) -> + ok | {error, any()}. set_incoming_handle(CallID, DialogHandle) -> TFun = pa:bind(fun set_incoming_handle_tr/2, CallID, DialogHandle), run_transaction(TFun). @@ -136,6 +141,9 @@ set_incoming_handle_tr(CallID, DialogHandle) -> {error, not_found} end. +-spec set_outgoing_handle(CallID :: call_id(), DialogHandle :: dialog_hangle(), + From :: jid:jid(), To :: jid:jid()) -> + ok | {error, any()}. set_outgoing_handle(CallID, DialogHandle, From, To) -> TFun = pa:bind(fun set_outgoing_handle_tr/4, CallID, DialogHandle, From, To), run_transaction(TFun). @@ -157,6 +165,8 @@ set_outgoing_handle_tr(CallID, DialogHandle, _From, _To) -> mnesia:write(Session) end. +-spec set_incoming_accepted(CallID :: call_id()) -> + ok | {error, any()}. set_incoming_accepted(CallID) -> TFun = pa:bind(fun set_incoming_accepted_tr/1, CallID), run_transaction(TFun). @@ -172,6 +182,8 @@ set_incoming_accepted_tr(CallID) -> {error, not_found} end. +-spec set_outgoing_accepted(CallID :: call_id()) -> + ok | {error, any()}. set_outgoing_accepted(CallID) -> TFun = pa:bind(fun set_outgoing_accepted_tr/1, CallID), run_transaction(TFun). @@ -235,5 +247,6 @@ get_session_info(SID, User) -> {error, not_found} end. +-spec remove_session(call_id()) -> ok. remove_session(CallID) -> mnesia:dirty_delete(jingle_sip_session, CallID). diff --git a/test/common/config_parser_helper.erl b/test/common/config_parser_helper.erl index e11da3c0c98..b4115becad1 100644 --- a/test/common/config_parser_helper.erl +++ b/test/common/config_parser_helper.erl @@ -883,7 +883,7 @@ default_mod_config(mod_inbox) -> max_result_limit => infinity}; default_mod_config(mod_jingle_sip) -> #{proxy_host => "localhost", proxy_port => 5060, listen_port => 5600, local_host => "localhost", - sdp_origin => "127.0.0.1", transport => "udp", username_to_phone => []}; + sdp_origin => "127.0.0.1", transport => "udp", username_to_phone => [], backend => mnesia}; default_mod_config(mod_keystore) -> #{ram_key_size => 2048, keys => #{}}; default_mod_config(mod_last) -> diff --git a/test/config_parser_SUITE.erl b/test/config_parser_SUITE.erl index afef8b760c4..c1f5fc79a04 100644 --- a/test/config_parser_SUITE.erl +++ b/test/config_parser_SUITE.erl @@ -1905,6 +1905,7 @@ mod_jingle_sip(_Config) -> check_module_defaults(mod_jingle_sip), T = fun(Opts) -> #{<<"modules">> => #{<<"mod_jingle_sip">> => Opts}} end, P = [modules, mod_jingle_sip], + ?cfgh(P ++ [backend], mnesia, T(#{<<"backend">> => <<"mnesia">>})), ?cfgh(P ++ [proxy_host], "proxxxy.com", T(#{<<"proxy_host">> => <<"proxxxy.com">>})), ?cfgh(P ++ [proxy_port], 5601, @@ -1920,6 +1921,7 @@ mod_jingle_sip(_Config) -> ?cfgh(P ++ [username_to_phone], [{<<"2000006168">>, <<"+919177074440">>}], T(#{<<"username_to_phone">> => [#{<<"username">> => <<"2000006168">>, <<"phone">> => <<"+919177074440">>}]})), + ?errh(T(#{<<"backend">> => <<"amnesia">>})), ?errh(T(#{<<"proxy_host">> => 1})), ?errh(T(#{<<"proxy_port">> => 1000000})), ?errh(T(#{<<"listen_port">> => -1})), From 1609279b5a1b69cb175e931fbd1d78bb4e3c3b8c Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Thu, 3 Aug 2023 18:22:42 +0200 Subject: [PATCH 143/161] Add backend option into docs for mod_jingle_sip --- doc/modules/mod_jingle_sip.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/doc/modules/mod_jingle_sip.md b/doc/modules/mod_jingle_sip.md index fcf093eb90e..5100e5b642c 100644 --- a/doc/modules/mod_jingle_sip.md +++ b/doc/modules/mod_jingle_sip.md @@ -102,6 +102,13 @@ MongooseIM packages are built with Jingle/SIP support. ## Options +### `modules.mod_jingle_sip.backend` +* **Syntax:** string, `"mnesia"`, `"cets"` +* **Default:** `"mnesia"` +* **Example:** `backend = "cets"` + +Backend for in-memory data for this module. + ### `modules.mod_jingle_sip.proxy_host` * **Syntax:** string * **Default:** `"localhost"` From eac044b1a09c446fee48291f52a5b16573abf318 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 4 Aug 2023 12:18:49 +0200 Subject: [PATCH 144/161] Add mod_jingle_sip_session Add only basic functions to mod_jingle_sip_backend Define record type for jingle_sip_session --- include/mod_jingle_sip_session.hrl | 15 ++ src/jingle_sip/jingle_sip_callbacks.erl | 14 +- src/jingle_sip/mod_jingle_sip.erl | 14 +- src/jingle_sip/mod_jingle_sip_backend.erl | 134 ++---------- src/jingle_sip/mod_jingle_sip_cets.erl | 230 +++----------------- src/jingle_sip/mod_jingle_sip_mnesia.erl | 243 +++------------------- src/jingle_sip/mod_jingle_sip_session.erl | 195 +++++++++++++++++ 7 files changed, 304 insertions(+), 541 deletions(-) create mode 100644 include/mod_jingle_sip_session.hrl create mode 100644 src/jingle_sip/mod_jingle_sip_session.erl diff --git a/include/mod_jingle_sip_session.hrl b/include/mod_jingle_sip_session.hrl new file mode 100644 index 00000000000..ed16350cdc7 --- /dev/null +++ b/include/mod_jingle_sip_session.hrl @@ -0,0 +1,15 @@ +%% Defineds record to store SIP session information +%% Type is in mod_jingle_sip_session:session() +-record(jingle_sip_session, { + %% SIP CallID + sid, + dialog, + state, + direction, + request, + node, + owner, + from, + to, + now, + meta}). diff --git a/src/jingle_sip/jingle_sip_callbacks.erl b/src/jingle_sip/jingle_sip_callbacks.erl index 09b742f721c..73dec3b9f77 100644 --- a/src/jingle_sip/jingle_sip_callbacks.erl +++ b/src/jingle_sip/jingle_sip_callbacks.erl @@ -102,7 +102,7 @@ translate_and_deliver_invite(Req, FromJID, FromBinary, ToJID, ToBinary) -> JingleEl = jingle_sip_helper:jingle_element(CallID, <<"session-initiate">>, ContentEls ++ OtherEls), - ok = mod_jingle_sip_backend:set_incoming_request(CallID, ReqID, FromJID, ToJID, JingleEl), + ok = mod_jingle_sip_session:set_incoming_request(CallID, ReqID, FromJID, ToJID, JingleEl), ?LOG_INFO(#{what => sip_invite, text => <<"Got SIP INVITE from NkSIP">>, from_jid => FromBinary, to_jid => ToBinary, @@ -176,7 +176,7 @@ sip_bye(Req, _Call) -> from_jid => FromJID, to_jid => ToJID }), maybe_route_to_all_sessions(FromJID, ToJID, Acc, IQEl), - ok = mod_jingle_sip_backend:remove_session(CallID), + ok = mod_jingle_sip_session:remove_session(CallID), {reply, ok}. sip_cancel(_InviteReq, Req, _Call) -> @@ -194,7 +194,7 @@ sip_cancel(_InviteReq, Req, _Call) -> from_jid => FromJID, to_jid => ToJID }), maybe_route_to_all_sessions(FromJID, ToJID, Acc, IQEl), - ok = mod_jingle_sip_backend:remove_session(CallID), + ok = mod_jingle_sip_session:remove_session(CallID), {reply, ok}. sip_dialog_update(start, Dialog, Call) -> @@ -203,7 +203,7 @@ sip_dialog_update(start, Dialog, Call) -> case Transaction#trans.class of uas -> {ok, CallID} = nksip_dialog:call_id(Dialog), - mod_jingle_sip_backend:set_incoming_handle(CallID, DialogHandle); + mod_jingle_sip_session:set_incoming_handle(CallID, DialogHandle); _ -> ok @@ -247,7 +247,7 @@ invite_resp_callback({resp, 200, SIPMsg, _Call}) -> element => IQEl, from_jid => FromJID, to_jid => ToJID }), - ok = mod_jingle_sip_backend:set_outgoing_accepted(CallID), + ok = mod_jingle_sip_session:set_outgoing_accepted(CallID), maybe_route_to_all_sessions(FromJID, ToJID, Acc, IQEl), ok; invite_resp_callback({resp, 487, _SIPMsg, _Call}) -> @@ -286,7 +286,7 @@ invite_resp_callback({resp, ErrorCode, SIPMsg, _Call}) from_jid => FromJID, to_jid => ToJID }), maybe_route_to_all_sessions(FromJID, ToJID, Acc, IQEl), - ok = mod_jingle_sip_backend:remove_session(CallID), + ok = mod_jingle_sip_session:remove_session(CallID), ok; invite_resp_callback(Data) -> ?LOG_ERROR(#{what => sip_unknown_response, sip_data => Data}). @@ -304,7 +304,7 @@ send_ringing_session_info(SIPMsg, ErrorCode) -> dialog_id => DialogId, server_id => SrvId, from_jid => FromBinary, to_binary => ToBinary}), - mod_jingle_sip_backend:set_outgoing_handle(CallID, DialogHandle, FromJID, ToJID), + mod_jingle_sip_session:set_outgoing_handle(CallID, DialogHandle, FromJID, ToJID), RingingEl = #xmlel{name = <<"ringing">>, attrs = [{<<"xmlns">>, <<"urn:xmpp:jingle:apps:rtp:info:1">>}]}, diff --git a/src/jingle_sip/mod_jingle_sip.erl b/src/jingle_sip/mod_jingle_sip.erl index ef3d20378b0..7751caef50d 100644 --- a/src/jingle_sip/mod_jingle_sip.erl +++ b/src/jingle_sip/mod_jingle_sip.erl @@ -209,7 +209,7 @@ resend_session_initiate(#iq{sub_el = Jingle} = IQ, Acc) -> From = mongoose_acc:from_jid(Acc), To = mongoose_acc:to_jid(Acc), SID = exml_query:attr(Jingle, <<"sid">>), - case mod_jingle_sip_backend:get_session_info(SID, From) of + case mod_jingle_sip_session:get_session_info(SID, From) of {ok, Session} -> maybe_resend_session_initiate(From, To, IQ, Acc, Session); _ -> @@ -250,7 +250,7 @@ translate_to_sip(<<"session-initiate">>, Jingle, Acc) -> %% Internal options async, {callback, fun jingle_sip_callbacks:invite_resp_callback/1}]), - Result = mod_jingle_sip_backend:set_outgoing_request(SID, Handle, FromJID, ToJID), + Result = mod_jingle_sip_session:set_outgoing_request(SID, Handle, FromJID, ToJID), {_, SrvId, DialogId, _CallId} = nksip_sipmsg:parse_handle(Handle), ?LOG_INFO(#{what => sip_session_start, text => <<"Start SIP session with set_outgoing_request call">>, @@ -261,7 +261,7 @@ translate_to_sip(<<"session-initiate">>, Jingle, Acc) -> translate_to_sip(<<"session-accept">>, Jingle, Acc) -> LServer = mongoose_acc:lserver(Acc), SID = exml_query:attr(Jingle, <<"sid">>), - case mod_jingle_sip_backend:get_incoming_request(SID, mongoose_acc:get(c2s, origin_jid, Acc)) of + case mod_jingle_sip_session:get_incoming_request(SID, mongoose_acc:get(c2s, origin_jid, Acc)) of {ok, ReqID} -> try_to_accept_session(ReqID, Jingle, Acc, LServer, SID); _ -> @@ -276,7 +276,7 @@ translate_to_sip(<<"source-update">> = Name, Jingle, Acc) -> translate_to_sip(<<"transport-info">>, Jingle, Acc) -> SID = exml_query:attr(Jingle, <<"sid">>), SDP = make_sdp_for_ice_candidate(Jingle), - case mod_jingle_sip_backend:get_outgoing_handle(SID, mongoose_acc:get(c2s, origin_jid, Acc)) of + case mod_jingle_sip_session:get_outgoing_handle(SID, mongoose_acc:get(c2s, origin_jid, Acc)) of {ok, undefined} -> ?LOG_ERROR(#{what => sip_missing_dialog, sid => SID, acc => Acc}), {error, item_not_found}; @@ -293,7 +293,7 @@ translate_to_sip(<<"session-terminate">>, Jingle, Acc) -> From = mongoose_acc:get(c2s, origin_jid, Acc), FromLUS = jid:to_lus(From), ToLUS = jid:to_lus(ToJID), - case mod_jingle_sip_backend:get_session_info(SID, From) of + case mod_jingle_sip_session:get_session_info(SID, From) of {ok, Session} -> try_to_terminate_the_session(FromLUS, ToLUS, Session); _ -> @@ -303,7 +303,7 @@ translate_to_sip(<<"session-terminate">>, Jingle, Acc) -> translate_source_change_to_sip(ActionName, Jingle, Acc) -> SID = exml_query:attr(Jingle, <<"sid">>), SDP = get_spd(ActionName, Jingle, Acc), - case mod_jingle_sip_backend:get_outgoing_handle(SID, mongoose_acc:get(c2s, origin_jid, Acc)) of + case mod_jingle_sip_session:get_outgoing_handle(SID, mongoose_acc:get(c2s, origin_jid, Acc)) of {ok, undefined} -> ?LOG_ERROR(#{what => sip_missing_dialod, sid => SID, acc => Acc}), {error, item_not_found}; @@ -345,7 +345,7 @@ try_to_accept_session(ReqID, Jingle, Acc, Server, SID) -> LocalHost = gen_mod:get_module_opt(Server, ?MODULE, local_host), case nksip_request_reply({ok, [{body, SDP}, {local_host, LocalHost}]}, ReqID) of ok -> - ok = mod_jingle_sip_backend:set_incoming_accepted(SID), + ok = mod_jingle_sip_session:set_incoming_accepted(SID), terminate_session_on_other_devices(SID, Acc), ok; Other -> diff --git a/src/jingle_sip/mod_jingle_sip_backend.erl b/src/jingle_sip/mod_jingle_sip_backend.erl index 6a3f665ca5e..a4c61354798 100644 --- a/src/jingle_sip/mod_jingle_sip_backend.erl +++ b/src/jingle_sip/mod_jingle_sip_backend.erl @@ -1,77 +1,30 @@ %% @doc Backend module for mod_jingle_sip -%% @author Michal Piotrowski -%% -%%============================================================================== -%% Copyright 2018 Erlang Solutions Ltd. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%============================================================================== -module(mod_jingle_sip_backend). --include("mongoose.hrl"). - --type call_id() :: binary(). --type incoming_request() :: {node(), binary()}. --type outgoing_handle() :: binary(). --type dialog_hangle() :: term(). - -export([init/2]). --export([set_incoming_request/5]). --export([set_incoming_handle/2]). --export([set_outgoing_request/4]). --export([set_outgoing_handle/4]). --export([set_outgoing_accepted/1]). --export([set_incoming_accepted/1]). --export([get_incoming_request/2]). --export([get_outgoing_handle/2]). --export([get_session_info/2]). -export([remove_session/1]). +-export([init/2]). +-export([read_session/1]). +-export([write_new_session/2]). +-export([update_session/2]). +-export([remove_session/1]). + +-include("mongoose.hrl"). --ignore_xref([remove_session/1]). +-type call_id() :: mod_jingle_sip_session:call_id(). +-type session() :: mod_jingle_sip_session:session(). +-type update_fun() :: mod_jingle_sip_session:update_fun(). -define(MAIN_MODULE, mod_jingle_sip). -callback init(mongooseim:host_type(), gen_mod:module_opts()) -> ok. --callback set_incoming_request(CallID :: call_id(), ReqID :: binary(), - From :: jid:jid(), To :: jid:jid(), exml:element()) -> - ok | {error, any()}. - --callback set_outgoing_request(CallID :: call_id(), ReqID :: binary(), - From :: jid:jid(), To :: jid:jid()) -> - ok | {error, any()}. - --callback set_incoming_handle(CallID :: call_id(), DialogHandle :: dialog_hangle()) -> - ok | {error, any()}. - --callback set_outgoing_handle(CallID :: call_id(), DialogHandle :: dialog_hangle(), - From :: jid:jid(), To :: jid:jid()) -> - ok | {error, any()}. - --callback set_incoming_accepted(CallID :: call_id()) -> - ok | {error, any()}. - --callback set_outgoing_accepted(CallID :: call_id()) -> - ok | {error, any()}. - --callback get_incoming_request(call_id(), jid:jid()) -> - {ok, undefined | incoming_request()} | {error, not_found}. +-callback read_session(call_id()) -> [session()]. --callback get_outgoing_handle(call_id(), jid:jid()) -> - {ok, undefined | outgoing_handle()} | {error, not_found}. +-callback write_new_session(call_id(), session()) -> + ok | {error, conflict}. --callback get_session_info(call_id(), jid:jid()) -> - {ok, map()} | {error, any()}. +-callback update_session(call_id(), update_fun()) -> ok | {error, _}. -callback remove_session(call_id()) -> ok. @@ -81,61 +34,20 @@ init(Host, Opts) -> mongoose_backend:init(global, ?MAIN_MODULE, [], Opts), mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). --spec set_incoming_request(CallID :: call_id(), ReqID :: binary(), - From :: jid:jid(), To :: jid:jid(), exml:element()) -> - ok | {error, any()}. -set_incoming_request(CallID, ReqID, From, To, JingleEl) -> - Args = [CallID, ReqID, From, To, JingleEl], - mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). - --spec set_outgoing_request(CallID :: call_id(), ReqID :: binary(), - From :: jid:jid(), To :: jid:jid()) -> - ok | {error, any()}. -set_outgoing_request(CallID, ReqID, From, To) -> - Args = [CallID, ReqID, From, To], - mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). - --spec set_incoming_handle(CallID :: call_id(), DialogHandle :: dialog_hangle()) -> - ok | {error, any()}. -set_incoming_handle(CallID, DialogHandle) -> - Args = [CallID, DialogHandle], - mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). - --spec set_outgoing_handle(CallID :: call_id(), DialogHandle :: dialog_hangle(), - From :: jid:jid(), To :: jid:jid()) -> - ok | {error, any()}. -set_outgoing_handle(CallID, DialogHandle, From, To) -> - Args = [CallID, DialogHandle, From, To], - mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). - --spec set_incoming_accepted(CallID :: call_id()) -> - ok | {error, any()}. -set_incoming_accepted(CallID) -> - Args = [CallID], - mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). - --spec set_outgoing_accepted(CallID :: call_id()) -> - ok | {error, any()}. -set_outgoing_accepted(CallID) -> +-spec read_session(call_id()) -> [session()]. +read_session(CallID) -> Args = [CallID], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). --spec get_incoming_request(call_id(), jid:jid()) -> {ok, undefined | incoming_request()} | - {error, not_found}. -get_incoming_request(CallID, User) -> - Args = [CallID, User], - mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). - --spec get_outgoing_handle(call_id(), jid:jid()) -> {ok, undefined | outgoing_handle()} | - {error, not_found}. -get_outgoing_handle(SID, User) -> - Args = [SID, User], +-spec write_new_session(call_id(), session()) -> + ok | {error, conflict}. +write_new_session(CallID, Session) -> + Args = [CallID, Session], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). --spec get_session_info(call_id(), jid:jid()) -> - {ok, map()} | {error, any()}. -get_session_info(SID, User) -> - Args = [SID, User], +-spec update_session(call_id(), update_fun()) -> ok | {error, _}. +update_session(CallID, F) -> + Args = [CallID, F], mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). -spec remove_session(call_id()) -> ok. diff --git a/src/jingle_sip/mod_jingle_sip_cets.erl b/src/jingle_sip/mod_jingle_sip_cets.erl index 713f4c87ed8..d9c255a291a 100644 --- a/src/jingle_sip/mod_jingle_sip_cets.erl +++ b/src/jingle_sip/mod_jingle_sip_cets.erl @@ -1,232 +1,58 @@ -%% @doc Backend module for mod_jingle_sip -%% @author Michal Piotrowski -%% -%%============================================================================== -%% Copyright 2018 Erlang Solutions Ltd. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%============================================================================== +%% @doc Backend module for mod_jingle_sip for CETS backend -module(mod_jingle_sip_cets). -behaviour(mod_jingle_sip_backend). -include("mongoose.hrl"). --type call_id() :: binary(). --type incoming_request() :: {node(), binary()}. --type outgoing_handle() :: binary(). --type dialog_hangle() :: term(). - -export([init/2]). --export([set_incoming_request/5]). --export([set_incoming_handle/2]). --export([set_outgoing_request/4]). --export([set_outgoing_handle/4]). --export([set_outgoing_accepted/1]). --export([set_incoming_accepted/1]). --export([get_incoming_request/2]). --export([get_outgoing_handle/2]). --export([get_session_info/2]). +-export([read_session/1]). +-export([write_new_session/2]). +-export([update_session/2]). -export([remove_session/1]). --ignore_xref([remove_session/1]). - --record(jingle_sip_session, {sid, %% CallID - dialog, - state, - direction, - request, - node, - owner, - from, - to, - now, - meta}). +-type call_id() :: mod_jingle_sip_session:call_id(). +-type session() :: mod_jingle_sip_session:session(). +-type update_fun() :: mod_jingle_sip_session:update_fun(). -define(TABLE, cets_jingle_sip_session). -spec init(mongooseim:host_type(), gen_mod:module_opts()) -> ok. init(_Host, _Opts) -> + %% We store Erlang records, so keypos is 2 cets:start(?TABLE, #{keypos => 2}), cets_discovery:add_table(mongoose_cets_discovery, ?TABLE). --spec set_incoming_request(CallID :: call_id(), ReqID :: binary(), - From :: jid:jid(), To :: jid:jid(), exml:element()) -> - ok | {error, any()}. -set_incoming_request(CallID, ReqID, From, To, JingleEl) -> - Owner = jid:to_lus(To), - Meta = #{init_stanza => JingleEl}, - Session = #jingle_sip_session{sid = CallID, - request = ReqID, - dialog = undefined, - state = undefined, - direction = in, - node = node(), - from = jid:to_lus(From), - to = Owner, - owner = Owner, - now = os:system_time(microsecond), - meta = Meta}, - write_new_session(CallID, Session). - --spec set_outgoing_request(CallID :: call_id(), ReqID :: binary(), - From :: jid:jid(), To :: jid:jid()) -> - ok | {error, any()}. -set_outgoing_request(CallID, ReqID, From, To) -> - Owner = jid:to_lus(From), - Session = #jingle_sip_session{sid = CallID, - request = ReqID, - dialog = undefined, - state = undefined, - direction = out, - node = node(), - from = Owner, - to = jid:to_lus(To), - owner = Owner, - now = os:system_time(microsecond), - meta = #{}}, - write_new_session(CallID, Session). - --spec set_incoming_handle(CallID :: call_id(), DialogHandle :: dialog_hangle()) -> - ok | {error, any()}. -set_incoming_handle(CallID, DialogHandle) -> - case read_session(CallID) of - [#jingle_sip_session{dialog = undefined, direction = in} = Session] -> - Session2 = Session#jingle_sip_session{dialog = DialogHandle, - node = node()}, - update_session(Session2); - [_] -> - {error, incoming_handle_exists}; - _ -> - {error, not_found} - end. - --spec set_outgoing_handle(CallID :: call_id(), DialogHandle :: dialog_hangle(), - From :: jid:jid(), To :: jid:jid()) -> - ok | {error, any()}. -set_outgoing_handle(CallID, DialogHandle, _From, _To) -> - case read_session(CallID) of - [#jingle_sip_session{dialog = undefined, direction = out} = Session] -> - Session2 = Session#jingle_sip_session{dialog = DialogHandle, - node = node()}, - update_session(Session2); - [_] -> - {error, outgoing_handle_exists}; - _ -> - Session = #jingle_sip_session{sid = CallID, - dialog = DialogHandle, - node = node(), - direction = out, - state = ringing}, - cets:insert_new(?TABLE, Session) - end. - --spec set_incoming_accepted(CallID :: call_id()) -> - ok | {error, any()}. -set_incoming_accepted(CallID) -> - case read_session(CallID) of - [#jingle_sip_session{direction = in, meta = Meta} = Session] -> - MetaWithoutInitStanza = maps:without([init_stanza], Meta), - Session2 = Session#jingle_sip_session{state = accepted, - meta = MetaWithoutInitStanza}, - update_session(Session2); - _ -> - {error, not_found} - end. - --spec set_outgoing_accepted(CallID :: call_id()) -> - ok | {error, any()}. -set_outgoing_accepted(CallID) -> - case read_session(CallID) of - [#jingle_sip_session{direction = out} = Session] -> - Session2 = Session#jingle_sip_session{state = accepted}, - update_session(Session2); - _ -> - {error, not_found} - end. - --spec get_incoming_request(call_id(), jid:jid()) -> {ok, undefined | incoming_request()} | - {error, not_found}. -get_incoming_request(CallID, User) -> - UserUS = jid:to_lus(User), - case read_session(CallID) of - [#jingle_sip_session{request = ReqID, node = Node, owner = UserUS}] -> - {ok, {Node, ReqID}}; - _ -> - {error, not_found} - end. - --spec get_outgoing_handle(call_id(), jid:jid()) -> {ok, undefined | outgoing_handle()} | - {error, not_found}. -get_outgoing_handle(SID, User) -> - UserUS = jid:to_lus(User), - case read_session(SID) of - [#jingle_sip_session{dialog = Handle, owner = UserUS}] -> - {ok, Handle}; - _ -> - {error, not_found} - end. - --spec get_session_info(binary(), jid:jid()) -> - {ok, map()} | {error, any()}. -get_session_info(SID, User) -> - UserUS = jid:to_lus(User), - case read_session(SID) of - [#jingle_sip_session{sid = SID, - dialog = Handle, - request = Request, - state = State, - direction = Dir, - node = ONode, - owner = UserUS, - to = To, - from = From, - meta = Meta}] -> - {ok, #{sid => SID, - dialog => Handle, - request => Request, - state => State, - direction => Dir, - node => ONode, - from => From, - to => To, - meta => Meta}}; - _ -> - {error, not_found} - end. - --spec remove_session(call_id()) -> ok. -remove_session(CallID) -> - cets:delete(?TABLE, CallID). - --spec read_session(call_id()) -> [#jingle_sip_session{}]. +-spec read_session(call_id()) -> [session()]. read_session(CallID) -> ets:lookup(?TABLE, CallID). --spec write_new_session(call_id(), #jingle_sip_session{}) -> - ok | {error, sid_already_exists}. +-spec write_new_session(call_id(), session()) -> + ok | {error, conflict}. write_new_session(CallID, Session) -> case read_session(CallID) of [_] -> - {error, sid_already_exists}; + {error, conflict}; _ -> case cets:insert_new(?TABLE, Session) of true -> ok; false -> - {error, sid_already_exists} + {error, conflict} end end. --spec update_session(#jingle_sip_session{}) -> ok. -update_session(Session) -> - cets:insert(?TABLE, Session). +-spec update_session(call_id(), update_fun()) -> ok | {error, _}. +update_session(CallID, F) -> + case read_session(CallID) of + [Session] -> + case F(Session) of + {error, _} = Err -> Err; + Session2 -> cets:insert(?TABLE, Session2) + end; + _ -> + {error, not_found} + end. + +-spec remove_session(call_id()) -> ok. +remove_session(CallID) -> + cets:delete(?TABLE, CallID). diff --git a/src/jingle_sip/mod_jingle_sip_mnesia.erl b/src/jingle_sip/mod_jingle_sip_mnesia.erl index 22e177c0a72..bd3a96e9d14 100644 --- a/src/jingle_sip/mod_jingle_sip_mnesia.erl +++ b/src/jingle_sip/mod_jingle_sip_mnesia.erl @@ -1,56 +1,19 @@ -%% @doc Backend module for mod_jingle_sip -%% @author Michal Piotrowski -%% -%%============================================================================== -%% Copyright 2018 Erlang Solutions Ltd. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%============================================================================== +%% Mnesia backend module for mod_jingle_sip module -module(mod_jingle_sip_mnesia). -behaviour(mod_jingle_sip_backend). -include("mongoose.hrl"). - --type call_id() :: binary(). --type incoming_request() :: {node(), binary()}. --type outgoing_handle() :: binary(). --type dialog_hangle() :: term(). +-include("mod_jingle_sip_session.hrl"). -export([init/2]). --export([set_incoming_request/5]). --export([set_incoming_handle/2]). --export([set_outgoing_request/4]). --export([set_outgoing_handle/4]). --export([set_outgoing_accepted/1]). --export([set_incoming_accepted/1]). --export([get_incoming_request/2]). --export([get_outgoing_handle/2]). --export([get_session_info/2]). +-export([read_session/1]). +-export([write_new_session/2]). +-export([update_session/2]). -export([remove_session/1]). --ignore_xref([remove_session/1]). - --record(jingle_sip_session, {sid, %% CallID - dialog, - state, - direction, - request, - node, - owner, - from, - to, - now, - meta}). +-type call_id() :: mod_jingle_sip_session:call_id(). +-type session() :: mod_jingle_sip_session:session(). +-type update_fun() :: mod_jingle_sip_session:update_fun(). -spec init(mongooseim:host_type(), gen_mod:module_opts()) -> ok. init(_Host, _Opts) -> @@ -59,59 +22,36 @@ init(_Host, _Opts) -> {attributes, record_info(fields, jingle_sip_session)}]), ok. --spec set_incoming_request(CallID :: call_id(), ReqID :: binary(), - From :: jid:jid(), To :: jid:jid(), exml:element()) -> - ok | {error, any()}. -set_incoming_request(CallID, ReqID, From, To, JingleEl) -> - TFun = pa:bind(fun set_incoming_request_tr/5, CallID, ReqID, From, To, JingleEl), - run_transaction(TFun). +-spec read_session(call_id()) -> [session()]. +read_session(CallID) -> + mnesia:dirty_read(jingle_sip_session, CallID). + +-spec write_new_session(call_id(), session()) -> + ok | {error, conflict}. +write_new_session(CallID, Session) -> + run_transaction(fun() -> write_new_session_tr(CallID, Session) end). -set_incoming_request_tr(CallID, ReqID, From, To, JingleEl) -> - Owner = jid:to_lus(To), +write_new_session_tr(CallID, Session) -> case mnesia:wread({jingle_sip_session, CallID}) of [_] -> - {error, sid_already_exists}; + {error, conflict}; _ -> - Meta = #{init_stanza => JingleEl}, - Session = #jingle_sip_session{sid = CallID, - request = ReqID, - dialog = undefined, - state = undefined, - direction = in, - node = node(), - from = jid:to_lus(From), - to = Owner, - owner = Owner, - now = os:system_time(microsecond), - meta = Meta}, mnesia:write(Session) end. --spec set_outgoing_request(CallID :: call_id(), ReqID :: binary(), - From :: jid:jid(), To :: jid:jid()) -> - ok | {error, any()}. -set_outgoing_request(CallID, ReqID, From, To) -> - TFun = pa:bind(fun set_outgoing_request_tr/4, CallID, ReqID, From, To), - run_transaction(TFun). +-spec update_session(call_id(), update_fun()) -> ok | {error, _}. +update_session(CallID, F) -> + run_transaction(fun() -> update_session_tr(CallID, F) end). -set_outgoing_request_tr(CallID, ReqID, From, To) -> - Owner = jid:to_lus(From), +update_session_tr(CallID, F) -> case mnesia:wread({jingle_sip_session, CallID}) of - [_] -> - {error, sid_already_exists}; - _ -> - Session = #jingle_sip_session{sid = CallID, - request = ReqID, - dialog = undefined, - state = undefined, - direction = out, - node = node(), - from = Owner, - to = jid:to_lus(To), - owner = Owner, - now = os:system_time(microsecond), - meta = #{}}, - mnesia:write(Session) + [Session] -> + case F(Session) of + {error, _} = Err -> Err; + Session2 -> mnesia:write(Session2) + end; + _ -> + {error, not_found} end. run_transaction(TFun) -> @@ -122,131 +62,6 @@ run_transaction(TFun) -> {error, Reason} end. - --spec set_incoming_handle(CallID :: call_id(), DialogHandle :: dialog_hangle()) -> - ok | {error, any()}. -set_incoming_handle(CallID, DialogHandle) -> - TFun = pa:bind(fun set_incoming_handle_tr/2, CallID, DialogHandle), - run_transaction(TFun). - -set_incoming_handle_tr(CallID, DialogHandle) -> - case mnesia:wread({jingle_sip_session, CallID}) of - [#jingle_sip_session{dialog = undefined, direction = in} = Session] -> - Session2 = Session#jingle_sip_session{dialog = DialogHandle, - node = node()}, - mnesia:write(Session2); - [_] -> - {error, incoming_handle_exists}; - _ -> - {error, not_found} - end. - --spec set_outgoing_handle(CallID :: call_id(), DialogHandle :: dialog_hangle(), - From :: jid:jid(), To :: jid:jid()) -> - ok | {error, any()}. -set_outgoing_handle(CallID, DialogHandle, From, To) -> - TFun = pa:bind(fun set_outgoing_handle_tr/4, CallID, DialogHandle, From, To), - run_transaction(TFun). - -set_outgoing_handle_tr(CallID, DialogHandle, _From, _To) -> - case mnesia:wread({jingle_sip_session, CallID}) of - [#jingle_sip_session{dialog = undefined, direction = out} = Session] -> - Session2 = Session#jingle_sip_session{dialog = DialogHandle, - node = node()}, - mnesia:write(Session2); - [_] -> - {error, outgoing_handle_exists}; - _ -> - Session = #jingle_sip_session{sid = CallID, - dialog = DialogHandle, - node = node(), - direction = out, - state = ringing}, - mnesia:write(Session) - end. - --spec set_incoming_accepted(CallID :: call_id()) -> - ok | {error, any()}. -set_incoming_accepted(CallID) -> - TFun = pa:bind(fun set_incoming_accepted_tr/1, CallID), - run_transaction(TFun). - -set_incoming_accepted_tr(CallID) -> - case mnesia:wread({jingle_sip_session, CallID}) of - [#jingle_sip_session{direction = in, meta = Meta} = Session] -> - MetaWithoutInitStanza = maps:without([init_stanza], Meta), - Session2 = Session#jingle_sip_session{state = accepted, - meta = MetaWithoutInitStanza}, - mnesia:write(Session2); - _ -> - {error, not_found} - end. - --spec set_outgoing_accepted(CallID :: call_id()) -> - ok | {error, any()}. -set_outgoing_accepted(CallID) -> - TFun = pa:bind(fun set_outgoing_accepted_tr/1, CallID), - run_transaction(TFun). - -set_outgoing_accepted_tr(CallID) -> - case mnesia:wread({jingle_sip_session, CallID}) of - [#jingle_sip_session{direction = out} = Session] -> - Session2 = Session#jingle_sip_session{state = accepted}, - mnesia:write(Session2); - _ -> - {error, not_found} - end. - --spec get_incoming_request(call_id(), jid:jid()) -> {ok, undefined | incoming_request()} | - {error, not_found}. -get_incoming_request(CallID, User) -> - UserUS = jid:to_lus(User), - case mnesia:dirty_read(jingle_sip_session, CallID) of - [#jingle_sip_session{request = ReqID, node = Node, owner = UserUS}] -> - {ok, {Node, ReqID}}; - _ -> - {error, not_found} - end. - --spec get_outgoing_handle(call_id(), jid:jid()) -> {ok, undefined | outgoing_handle()} | - {error, not_found}. -get_outgoing_handle(SID, User) -> - UserUS = jid:to_lus(User), - case mnesia:dirty_read(jingle_sip_session, SID) of - [#jingle_sip_session{dialog = Handle, owner = UserUS}] -> - {ok, Handle}; - _ -> - {error, not_found} - end. - --spec get_session_info(binary(), jid:jid()) -> - {ok, map()} | {error, any()}. -get_session_info(SID, User) -> - UserUS = jid:to_lus(User), - case mnesia:dirty_read(jingle_sip_session, SID) of - [#jingle_sip_session{sid = SID, - dialog = Handle, - request = Request, - state = State, - direction = Dir, - node = ONode, - owner = UserUS, - to = To, - from = From, - meta = Meta}] -> - {ok, #{sid => SID, - dialog => Handle, - request => Request, - state => State, - direction => Dir, - node => ONode, - from => From, - to => To, - meta => Meta}}; - _ -> - {error, not_found} - end. - -spec remove_session(call_id()) -> ok. remove_session(CallID) -> mnesia:dirty_delete(jingle_sip_session, CallID). diff --git a/src/jingle_sip/mod_jingle_sip_session.erl b/src/jingle_sip/mod_jingle_sip_session.erl new file mode 100644 index 00000000000..2647baf3788 --- /dev/null +++ b/src/jingle_sip/mod_jingle_sip_session.erl @@ -0,0 +1,195 @@ +%% @doc Handles operations with #jingle_sip_session{} record +-module(mod_jingle_sip_session). + +-export([set_incoming_request/5]). +-export([set_incoming_handle/2]). +-export([set_outgoing_request/4]). +-export([set_outgoing_handle/4]). +-export([set_outgoing_accepted/1]). +-export([set_incoming_accepted/1]). +-export([get_incoming_request/2]). +-export([get_outgoing_handle/2]). +-export([get_session_info/2]). +-export([remove_session/1]). + +-include("mongoose.hrl"). +-include("mod_jingle_sip_session.hrl"). + +-type call_id() :: binary(). +-type incoming_request() :: {node(), binary()}. +-type outgoing_handle() :: binary(). +-type dialog_handle() :: nksip:handle(). +-type request_id() :: binary(). +-type session() :: #jingle_sip_session{ + sid :: call_id(), + dialog :: dialog_handle() | undefined, + state :: accepted | ringing | undefined, + direction :: in | out, + request :: request_id() | undefined, + node :: node(), + owner :: jid:simple_bare_jid(), + from :: jid:simple_bare_jid(), + to :: jid:simple_bare_jid(), + now :: integer(), + meta :: #{init_stanza => exml:element()} + }. +-type update_fun() :: fun((session()) -> session() | {error, term()}). +-export_type([call_id/0, session/0, update_fun/0]). + +-spec make_simple_bare_jid(jid:jid()) -> jid:simple_bare_jid(). +make_simple_bare_jid(Jid) -> + {_, _} = jid:to_lus(Jid). + +-spec set_incoming_request(CallID :: call_id(), ReqID :: request_id(), + From :: jid:jid(), To :: jid:jid(), exml:element()) -> + ok | {error, any()}. +set_incoming_request(CallID, ReqID, From, To, JingleEl) -> + Owner = make_simple_bare_jid(To), + Meta = #{init_stanza => JingleEl}, + Session = #jingle_sip_session{sid = CallID, + request = ReqID, + dialog = undefined, + state = undefined, + direction = in, + node = node(), + from = make_simple_bare_jid(From), + to = Owner, + owner = Owner, + now = os:system_time(microsecond), + meta = Meta}, + mod_jingle_sip_backend:write_new_session(CallID, Session). + +-spec set_outgoing_request(CallID :: call_id(), ReqID :: request_id(), + From :: jid:jid(), To :: jid:jid()) -> + ok | {error, any()}. +set_outgoing_request(CallID, ReqID, From, To) -> + Owner = make_simple_bare_jid(From), + Session = #jingle_sip_session{sid = CallID, + request = ReqID, + dialog = undefined, + state = undefined, + direction = out, + node = node(), + from = Owner, + to = make_simple_bare_jid(To), + owner = Owner, + now = os:system_time(microsecond), + meta = #{}}, + mod_jingle_sip_backend:write_new_session(CallID, Session). + +-spec set_incoming_handle(CallID :: call_id(), DialogHandle :: dialog_handle()) -> + ok | {error, any()}. +set_incoming_handle(CallID, DialogHandle) -> + F = fun(Session) -> do_set_incoming_handle(DialogHandle, Session) end, + mod_jingle_sip_backend:update_session(CallID, F). + +do_set_incoming_handle(DialogHandle, Session = #jingle_sip_session{dialog = undefined, direction = in}) -> + Session#jingle_sip_session{dialog = DialogHandle, + node = node()}; +do_set_incoming_handle(_, _) -> + {error, incoming_handle_exists}. + +-spec set_outgoing_handle(CallID :: call_id(), DialogHandle :: dialog_handle(), + From :: jid:jid(), To :: jid:jid()) -> + ok | {error, any()}. +set_outgoing_handle(CallID, DialogHandle, From, To) -> + F = fun(Session) -> do_set_outgoing_handle(DialogHandle, Session) end, + case mod_jingle_sip_backend:update_session(CallID, F) of + {error, not_found} -> + Owner = make_simple_bare_jid(From), + Session = #jingle_sip_session{sid = CallID, + dialog = DialogHandle, + node = node(), + direction = out, + state = ringing, + from = Owner, + to = make_simple_bare_jid(To), + owner = Owner, + now = os:system_time(microsecond), + meta = #{}}, + mod_jingle_sip_backend:write_new_session(CallID, Session); + Res -> + Res + end. + +do_set_outgoing_handle(DialogHandle, Session = #jingle_sip_session{dialog = undefined, direction = out}) -> + Session#jingle_sip_session{dialog = DialogHandle, + node = node()}; +do_set_outgoing_handle(_, _) -> + {error, outgoing_handle_exists}. + +-spec set_incoming_accepted(CallID :: call_id()) -> + ok | {error, any()}. +set_incoming_accepted(CallID) -> + mod_jingle_sip_backend:update_session(CallID, fun do_set_incoming_accepted/1). + +do_set_incoming_accepted(Session = #jingle_sip_session{direction = in, meta = Meta}) -> + MetaWithoutInitStanza = maps:without([init_stanza], Meta), + Session#jingle_sip_session{state = accepted, + meta = MetaWithoutInitStanza}; +do_set_incoming_accepted(_) -> + {error, not_found}. + +-spec set_outgoing_accepted(CallID :: call_id()) -> + ok | {error, any()}. +set_outgoing_accepted(CallID) -> + mod_jingle_sip_backend:update_session(CallID, fun do_set_outgoing_accepted/1). + +do_set_outgoing_accepted(Session = #jingle_sip_session{direction = out}) -> + Session#jingle_sip_session{state = accepted}; +do_set_outgoing_accepted(_) -> + {error, not_found}. + +-spec get_incoming_request(call_id(), jid:jid()) -> {ok, undefined | incoming_request()} | + {error, not_found}. +get_incoming_request(CallID, User) -> + UserUS = make_simple_bare_jid(User), + case mod_jingle_sip_backend:read_session(CallID) of + [#jingle_sip_session{request = ReqID, node = Node, owner = UserUS}] -> + {ok, {Node, ReqID}}; + _ -> + {error, not_found} + end. + +-spec get_outgoing_handle(call_id(), jid:jid()) -> {ok, undefined | outgoing_handle()} | + {error, not_found}. +get_outgoing_handle(SID, User) -> + UserUS = make_simple_bare_jid(User), + case mod_jingle_sip_backend:read_session(SID) of + [#jingle_sip_session{dialog = Handle, owner = UserUS}] -> + {ok, Handle}; + _ -> + {error, not_found} + end. + +-spec get_session_info(binary(), jid:jid()) -> + {ok, map()} | {error, any()}. +get_session_info(SID, User) -> + UserUS = make_simple_bare_jid(User), + case mod_jingle_sip_backend:read_session(SID) of + [#jingle_sip_session{sid = SID, + dialog = Handle, + request = Request, + state = State, + direction = Dir, + node = ONode, + owner = UserUS, + to = To, + from = From, + meta = Meta}] -> + {ok, #{sid => SID, + dialog => Handle, + request => Request, + state => State, + direction => Dir, + node => ONode, + from => From, + to => To, + meta => Meta}}; + _ -> + {error, not_found} + end. + +-spec remove_session(call_id()) -> ok. +remove_session(CallID) -> + mod_jingle_sip_backend:remove_session(CallID). From 86a25f1cf111509079fd5b07464e053ab94de7cf Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 4 Aug 2023 16:26:41 +0200 Subject: [PATCH 145/161] Use latest CETS --- rebar.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rebar.lock b/rebar.lock index 41a8aeb49be..09c8d9a6595 100644 --- a/rebar.lock +++ b/rebar.lock @@ -8,7 +8,7 @@ {<<"certifi">>,{pkg,<<"certifi">>,<<"2.9.0">>},1}, {<<"cets">>, {git,"https://github.com/esl/cets.git", - {ref,"b4fefc99af7e5a729e47e1edf6453426941da766"}}, + {ref,"6094d51605315d4551107e41078538b5fec6585d"}}, 0}, {<<"cowboy">>,{pkg,<<"cowboy">>,<<"2.9.0">>},0}, {<<"cowboy_swagger">>,{pkg,<<"cowboy_swagger">>,<<"2.5.1">>},0}, From feec9670c3040e2022585f3ffa316333d38bd0c1 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 4 Aug 2023 17:24:55 +0200 Subject: [PATCH 146/161] Fix compilation warnings Call mongoose_metrics:init() in ejabberd_app, --- src/ejabberd_app.erl | 1 + src/global_distrib/mod_global_distrib_mapping.erl | 4 ++-- src/jingle_sip/mod_jingle_sip_backend.erl | 2 -- src/system_metrics/mongoose_system_metrics_sender.erl | 2 -- 4 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/ejabberd_app.erl b/src/ejabberd_app.erl index 0906435b70b..87f1eeee12c 100644 --- a/src/ejabberd_app.erl +++ b/src/ejabberd_app.erl @@ -63,6 +63,7 @@ do_start() -> ejabberd_commands:init(), mongoose_graphql_commands:start(), mongoose_config:start(), + mongoose_metrics:init(), db_init(), mongoose_router:start(), mongoose_logs:set_global_loglevel(mongoose_config:get_opt(loglevel)), diff --git a/src/global_distrib/mod_global_distrib_mapping.erl b/src/global_distrib/mod_global_distrib_mapping.erl index a380da345eb..3cbb7700956 100644 --- a/src/global_distrib/mod_global_distrib_mapping.erl +++ b/src/global_distrib/mod_global_distrib_mapping.erl @@ -133,7 +133,7 @@ hosts() -> %%-------------------------------------------------------------------- -spec start(mongooseim:host_type(), gen_mod:module_opts()) -> any(). -start(HostType, Opts = #{cache := CacheOpts}) -> +start(_HostType, Opts = #{cache := CacheOpts}) -> mod_global_distrib_mapping_backend:start(Opts#{backend => redis}), mongoose_metrics:ensure_metric(global, ?GLOBAL_DISTRIB_MAPPING_FETCH_TIME, histogram), @@ -153,7 +153,7 @@ start(HostType, Opts = #{cache := CacheOpts}) -> {max_size, MaxJids}]). -spec stop(mongooseim:host_type()) -> any(). -stop(HostType) -> +stop(_HostType) -> ets_cache:delete(?JID_TAB), ets_cache:delete(?DOMAIN_TAB), mod_global_distrib_mapping_backend:stop(). diff --git a/src/jingle_sip/mod_jingle_sip_backend.erl b/src/jingle_sip/mod_jingle_sip_backend.erl index a4c61354798..c1d32cea498 100644 --- a/src/jingle_sip/mod_jingle_sip_backend.erl +++ b/src/jingle_sip/mod_jingle_sip_backend.erl @@ -1,8 +1,6 @@ %% @doc Backend module for mod_jingle_sip -module(mod_jingle_sip_backend). --export([init/2]). --export([remove_session/1]). -export([init/2]). -export([read_session/1]). -export([write_new_session/2]). diff --git a/src/system_metrics/mongoose_system_metrics_sender.erl b/src/system_metrics/mongoose_system_metrics_sender.erl index 41f758310b2..694a4433bd3 100644 --- a/src/system_metrics/mongoose_system_metrics_sender.erl +++ b/src/system_metrics/mongoose_system_metrics_sender.erl @@ -4,8 +4,6 @@ -export([send/3]). --type google_analytics_report() :: string(). --type url() :: string(). -type report_struct() :: mongoose_system_metrics_collector:report_struct(). -spec send(service_mongoose_system_metrics:client_id(), From df35aee8d6afbb81f444f57d0e36f102380e3860 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 4 Aug 2023 18:29:50 +0200 Subject: [PATCH 147/161] Better error reporting in graphql_metric_SUITE:values_are_integers --- big_tests/tests/graphql_metric_SUITE.erl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/big_tests/tests/graphql_metric_SUITE.erl b/big_tests/tests/graphql_metric_SUITE.erl index b50ff43fa8d..f4ae3eb559a 100644 --- a/big_tests/tests/graphql_metric_SUITE.erl +++ b/big_tests/tests/graphql_metric_SUITE.erl @@ -445,7 +445,12 @@ check_spiral_dict(Dict) -> ?assert(is_integer(One)). values_are_integers(Map, Keys) -> - lists:foreach(fun(Key) -> ?assert(is_integer(maps:get(Key, Map))) end, Keys). + case lists:all(fun(Key) -> is_integer(maps:get(Key, Map)) end, Keys) of + true -> + ok; + false -> + ct:fail({values_are_integers, Keys, Map}) + end. metric_host_type() -> binary:replace(domain_helper:host_type(), <<" ">>, <<"_">>, [global]). From 083ea91b96d7a2815cdc03b40e2e0cceb054155c Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Fri, 4 Aug 2023 22:51:10 +0200 Subject: [PATCH 148/161] Create metrics after starting the backend in ejabberd_sm --- src/ejabberd_sm.erl | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/ejabberd_sm.erl b/src/ejabberd_sm.erl index a3b4ea8a6aa..38d6732586f 100644 --- a/src/ejabberd_sm.erl +++ b/src/ejabberd_sm.erl @@ -138,10 +138,6 @@ start() -> -spec start_link() -> 'ignore' | {'error', _} | {'ok', pid()}. start_link() -> - mongoose_metrics:ensure_metric(global, ?UNIQUE_COUNT_CACHE, gauge), - mongoose_metrics:create_probe_metric(global, totalSessionCount, mongoose_metrics_probe_total_sessions), - mongoose_metrics:create_probe_metric(global, uniqueSessionCount, mongoose_metrics_probe_unique_sessions), - mongoose_metrics:create_probe_metric(global, nodeSessionCount, mongoose_metrics_probe_node_sessions), gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). @@ -446,8 +442,16 @@ init([]) -> lists:foreach(fun(HostType) -> gen_hook:add_handlers(hooks(HostType)) end, ?ALL_HOST_TYPES), ejabberd_commands:register_commands(commands()), + %% Create metrics after backend has started, otherwise probe could have null value + create_metrics(), {ok, #state{}}. +create_metrics() -> + mongoose_metrics:ensure_metric(global, ?UNIQUE_COUNT_CACHE, gauge), + mongoose_metrics:create_probe_metric(global, totalSessionCount, mongoose_metrics_probe_total_sessions), + mongoose_metrics:create_probe_metric(global, uniqueSessionCount, mongoose_metrics_probe_unique_sessions), + mongoose_metrics:create_probe_metric(global, nodeSessionCount, mongoose_metrics_probe_node_sessions). + -spec hooks(binary()) -> [gen_hook:hook_tuple()]. hooks(HostType) -> [ From 50a1d015da0a02a481cef452c933e05b46f00710 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Sat, 5 Aug 2023 00:30:19 +0200 Subject: [PATCH 149/161] Disable mnesia in pgsql_cets preset --- big_tests/test.config | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/big_tests/test.config b/big_tests/test.config index ba22723b189..0b5a3864326 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -248,8 +248,7 @@ {jingle_sip_backend, cets}, {auth_method, "rdbms"}, {internal_databases, "[internal_databases.cets] - cluster_name = \"{{cluster_name}}\" -[internal_databases.mnesia]"}, %% We still using mnesia for modules that are not converted to use CETS + cluster_name = \"{{cluster_name}}\""}, {outgoing_pools, "[outgoing_pools.redis.global_distrib] scope = \"global\" workers = 10 From 58169fc6a83abc76d51df1707dabb341d9fb2977 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Sat, 5 Aug 2023 12:26:48 +0200 Subject: [PATCH 150/161] Start mod_offline with correct backend in tests --- big_tests/tests/inbox_SUITE.erl | 4 +++- big_tests/tests/push_integration_SUITE.erl | 6 ++++-- big_tests/tests/sm_SUITE.erl | 3 ++- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/big_tests/tests/inbox_SUITE.erl b/big_tests/tests/inbox_SUITE.erl index 72fdbada0e3..a3ba49cc830 100644 --- a/big_tests/tests/inbox_SUITE.erl +++ b/big_tests/tests/inbox_SUITE.erl @@ -170,10 +170,12 @@ init_per_group(GroupName, Config) when GroupName =:= regular; GroupName =:= asyn Config1 = dynamic_modules:save_modules(HostType, Config), Config2 = dynamic_modules:save_modules(SecHostType, Config1), InboxOptions = inbox_helper:inbox_opts(GroupName), + Backend = mongoose_helper:mnesia_or_rdbms_backend(), + ModOffline = config_parser_helper:mod_config(mod_offline, #{backend => Backend}), ok = dynamic_modules:ensure_modules(HostType, inbox_helper:inbox_modules(GroupName) ++ inbox_helper:muclight_modules() - ++ [{mod_offline, config_parser_helper:default_mod_config(mod_offline)}]), + ++ [{mod_offline, ModOffline}]), ok = dynamic_modules:ensure_modules(SecHostType, [{mod_inbox, InboxOptions#{aff_changes := false}}]), [{inbox_opts, InboxOptions} | Config2]; diff --git a/big_tests/tests/push_integration_SUITE.erl b/big_tests/tests/push_integration_SUITE.erl index b91108b0d02..5fc2551f96e 100644 --- a/big_tests/tests/push_integration_SUITE.erl +++ b/big_tests/tests/push_integration_SUITE.erl @@ -1015,8 +1015,9 @@ mongoose_push_api_for_group(_) -> <<"v3">>. required_modules_for_group(pm_notifications_with_inbox, API, PubSubHost) -> + Backend = mongoose_helper:mnesia_or_rdbms_backend(), [{mod_inbox, inbox_opts()}, - {mod_offline, config_parser_helper:mod_config(mod_offline, #{})} | + {mod_offline, config_parser_helper:mod_config(mod_offline, #{backend => Backend})} | required_modules(API, PubSubHost)]; required_modules_for_group(groupchat_notifications_with_inbox, API, PubSubHost)-> [{mod_inbox, inbox_opts()}, {mod_muc_light, muc_light_opts()} @@ -1024,10 +1025,11 @@ required_modules_for_group(groupchat_notifications_with_inbox, API, PubSubHost)- required_modules_for_group(muclight_msg_notifications, API, PubSubHost) -> [{mod_muc_light, muc_light_opts()} | required_modules(API, PubSubHost)]; required_modules_for_group(integration_with_sm_and_offline_storage, API, PubSubHost) -> + Backend = mongoose_helper:mnesia_or_rdbms_backend(), [{mod_muc_light, muc_light_opts()}, {mod_stream_management, config_parser_helper:mod_config(mod_stream_management, #{ack_freq => never, resume_timeout => 1})}, - {mod_offline, config_parser_helper:mod_config(mod_offline, #{})} | + {mod_offline, config_parser_helper:mod_config(mod_offline, #{backend => Backend})} | required_modules(API, PubSubHost)]; required_modules_for_group(enhanced_integration_with_sm, API, PubSubHost) -> [{mod_stream_management, config_parser_helper:mod_config(mod_stream_management, #{ack_freq => never})} | diff --git a/big_tests/tests/sm_SUITE.erl b/big_tests/tests/sm_SUITE.erl index cb6125d9a0a..e19d4cdea16 100644 --- a/big_tests/tests/sm_SUITE.erl +++ b/big_tests/tests/sm_SUITE.erl @@ -194,8 +194,9 @@ required_modules(Config, Scope, Name) -> stopped -> stopped; ExtraOpts -> maps:merge(common_sm_opts(Config), ExtraOpts) end, + Backend = mongoose_helper:mnesia_or_rdbms_backend(), [{mod_stream_management, config_parser_helper:mod_config(mod_stream_management, SMConfig)}, - {mod_offline, config_parser_helper:mod_config(mod_offline, #{})}]. + {mod_offline, config_parser_helper:mod_config(mod_offline, #{backend => Backend})}]. required_sm_opts(group, parallel) -> #{ack_freq => never}; From 2a9bd44f2297c591c3769aae82071af152587154 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Sat, 5 Aug 2023 14:48:18 +0200 Subject: [PATCH 151/161] Restore loglevel after graphql_server_SUITE:set_and_get_loglevel_test testcase --- big_tests/src/mim_loglevel.erl | 13 +++++++++++++ big_tests/tests/graphql_server_SUITE.erl | 7 +++++++ 2 files changed, 20 insertions(+) diff --git a/big_tests/src/mim_loglevel.erl b/big_tests/src/mim_loglevel.erl index 39a951dd573..3cd01ad3390 100644 --- a/big_tests/src/mim_loglevel.erl +++ b/big_tests/src/mim_loglevel.erl @@ -1,6 +1,8 @@ -module(mim_loglevel). -export([enable_logging/2]). -export([disable_logging/2]). +-export([save_log_level/1]). +-export([restore_log_level/1]). enable_logging(Hosts, Levels) -> [set_custom(Host, Module, Level) || Host <- Hosts, {Module, Level} <- Levels]. @@ -15,3 +17,14 @@ set_custom(Host, Module, Level) -> clear_custom(Host, Module, _Level) -> Node = ct:get_config({hosts, Host, node}), mongoose_helper:successful_rpc(#{node => Node}, mongoose_logs, clear_module_loglevel, [Module]). + +save_log_level(Config) -> + Node = distributed_helper:mim(), + OldLogLevel = distributed_helper:rpc(Node, mongoose_logs, get_global_loglevel, []), + [{old_log_level, OldLogLevel} | Config]. + +restore_log_level(Config) -> + Node = distributed_helper:mim(), + {old_log_level, OldLogLevel} = lists:keyfind(old_log_level, 1, Config), + ok = distributed_helper:rpc(Node, mongoose_logs, set_global_loglevel, [OldLogLevel]), + ok. diff --git a/big_tests/tests/graphql_server_SUITE.erl b/big_tests/tests/graphql_server_SUITE.erl index 0aca24eb405..1d8ed66f843 100644 --- a/big_tests/tests/graphql_server_SUITE.erl +++ b/big_tests/tests/graphql_server_SUITE.erl @@ -96,9 +96,16 @@ end_per_group(Group, _Config) when Group =:= admin_http; end_per_group(_, _Config) -> escalus_fresh:clean(). +init_per_testcase(set_and_get_loglevel_test = CaseName, Config) -> + Config1 = mim_loglevel:save_log_level(Config), + escalus:init_per_testcase(CaseName, Config1); init_per_testcase(CaseName, Config) -> escalus:init_per_testcase(CaseName, Config). + +end_per_testcase(set_and_get_loglevel_test = CaseName, Config) -> + mim_loglevel:restore_log_level(Config), + escalus:end_per_testcase(CaseName, Config); end_per_testcase(CaseName, Config) when CaseName == join_successful orelse CaseName == join_successful_http orelse CaseName == join_twice -> From 128a7c791d4cf8b74a11530d7c645e81cba72fc6 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Sat, 5 Aug 2023 14:50:52 +0200 Subject: [PATCH 152/161] Ignore graphql_server_SUITE:clustering_http_tests for CETS --- big_tests/tests/graphql_server_SUITE.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/big_tests/tests/graphql_server_SUITE.erl b/big_tests/tests/graphql_server_SUITE.erl index 1d8ed66f843..288c2f55c1d 100644 --- a/big_tests/tests/graphql_server_SUITE.erl +++ b/big_tests/tests/graphql_server_SUITE.erl @@ -79,7 +79,7 @@ init_per_group(admin_http, Config) -> graphql_helper:init_admin_handler(Config); init_per_group(admin_cli, Config) -> graphql_helper:init_admin_cli(Config); -init_per_group(clustering_tests, Config) -> +init_per_group(Group, Config) when Group =:= clustering_tests; Group =:= clustering_http_tests -> case is_sm_distributed() of true -> Config; From 38cbc4a3b6e385ad1d89c34e70804677e7f15ba8 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Sat, 5 Aug 2023 15:44:00 +0200 Subject: [PATCH 153/161] Set correct backend for mod_pubsub in push_pubsub_SUITE --- big_tests/tests/push_pubsub_SUITE.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/big_tests/tests/push_pubsub_SUITE.erl b/big_tests/tests/push_pubsub_SUITE.erl index 17b728f62a3..80c9f9a1ca6 100644 --- a/big_tests/tests/push_pubsub_SUITE.erl +++ b/big_tests/tests/push_pubsub_SUITE.erl @@ -425,7 +425,8 @@ required_modules(APIVersion) -> [{mod_pubsub, config_parser_helper:mod_config(mod_pubsub, #{ plugins => [<<"dag">>, <<"push">>], nodetree => nodetree_dag, - host => subhost_pattern(?PUBSUB_SUB_DOMAIN ++ ".@HOST@") + host => subhost_pattern(?PUBSUB_SUB_DOMAIN ++ ".@HOST@"), + backend => mongoose_helper:mnesia_or_rdbms_backend() })}, {mod_push_service_mongoosepush, config_parser_helper:mod_config(mod_push_service_mongoosepush, #{pool_name => mongoose_push_http, From 5d9890532ed14bc2190240829d08e52fc4c13806 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Sat, 5 Aug 2023 15:44:26 +0200 Subject: [PATCH 154/161] Set correct backend for mod_muclight in rest_client_SUITE --- big_tests/tests/rest_client_SUITE.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/big_tests/tests/rest_client_SUITE.erl b/big_tests/tests/rest_client_SUITE.erl index c1bd5f06736..aeed6ea3f58 100644 --- a/big_tests/tests/rest_client_SUITE.erl +++ b/big_tests/tests/rest_client_SUITE.erl @@ -209,7 +209,8 @@ muc_light_opts(suite) -> #{}. common_muc_light_opts() -> - #{rooms_in_rosters => true}. + #{rooms_in_rosters => true, + backend => mongoose_helper:mnesia_or_rdbms_backend()}. %% -------------------------------------------------------------------- %% Test cases From 3677b7756350bc32963553628bdfb4a0bbde0c77 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Sat, 5 Aug 2023 15:47:21 +0200 Subject: [PATCH 155/161] Set correct backend in vcard_simple_SUITE --- big_tests/tests/vcard_simple_SUITE.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/big_tests/tests/vcard_simple_SUITE.erl b/big_tests/tests/vcard_simple_SUITE.erl index ecf1f9c3894..4ea297e73a5 100644 --- a/big_tests/tests/vcard_simple_SUITE.erl +++ b/big_tests/tests/vcard_simple_SUITE.erl @@ -465,7 +465,8 @@ prepare_vcard_module(Config) -> %% Keep the old config, so we can undo our changes, once finished testing Config1 = dynamic_modules:save_modules(host_types(), Config), %% Get a list of options, we can use as a prototype to start new modules - VCardOpts = config_parser_helper:default_mod_config(mod_vcard), + Backend = mongoose_helper:mnesia_or_rdbms_backend(), + VCardOpts = config_parser_helper:mod_config(mod_vcard, #{backend => Backend}), [{mod_vcard_opts, VCardOpts} | Config1]. restore_vcard_module(Config) -> From a5534f7384bdb1a551c8d578607ac512c9e18b77 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Sat, 5 Aug 2023 16:28:31 +0200 Subject: [PATCH 156/161] Configure mod_offline with correct backend in xep_0352_csi_SUITE --- big_tests/tests/xep_0352_csi_SUITE.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/big_tests/tests/xep_0352_csi_SUITE.erl b/big_tests/tests/xep_0352_csi_SUITE.erl index a991a1be68b..6ae55a7b5ce 100644 --- a/big_tests/tests/xep_0352_csi_SUITE.erl +++ b/big_tests/tests/xep_0352_csi_SUITE.erl @@ -37,8 +37,9 @@ suite() -> init_per_suite(Config) -> NewConfig = dynamic_modules:save_modules(host_type(), Config), + Backend = mongoose_helper:mnesia_or_rdbms_backend(), dynamic_modules:ensure_modules( - host_type(), [{mod_offline, default_mod_config(mod_offline)}, + host_type(), [{mod_offline, mod_config(mod_offline, #{backend => Backend})}, {mod_csi, mod_config(mod_csi, #{buffer_max => ?CSI_BUFFER_MAX})}]), [{escalus_user_db, {module, escalus_ejabberd}} | escalus:init_per_suite(NewConfig)]. From 32621f37900ed93a932f109bbb66a15e3859f6ad Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Mon, 7 Aug 2023 10:56:24 +0200 Subject: [PATCH 157/161] Update CETS dependency --- rebar.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rebar.lock b/rebar.lock index 09c8d9a6595..a2e9b3955c2 100644 --- a/rebar.lock +++ b/rebar.lock @@ -8,7 +8,7 @@ {<<"certifi">>,{pkg,<<"certifi">>,<<"2.9.0">>},1}, {<<"cets">>, {git,"https://github.com/esl/cets.git", - {ref,"6094d51605315d4551107e41078538b5fec6585d"}}, + {ref,"9965e3b35f3776dff5879effaab538a0ab94592d"}}, 0}, {<<"cowboy">>,{pkg,<<"cowboy">>,<<"2.9.0">>},0}, {<<"cowboy_swagger">>,{pkg,<<"cowboy_swagger">>,<<"2.5.1">>},0}, From fa0c589e9d4a16b001de4bb2a54416377b5a1cbe Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 8 Aug 2023 16:04:49 +0200 Subject: [PATCH 158/161] Clean mongoose_node_num.erl --- src/mongoose_node_num.erl | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/src/mongoose_node_num.erl b/src/mongoose_node_num.erl index 68c53c7d96a..ab2f67ccdbd 100644 --- a/src/mongoose_node_num.erl +++ b/src/mongoose_node_num.erl @@ -1,23 +1,16 @@ %% Returns a numeric id from 0 to 255 for the current node. %% Used to generate MAM IDs. -module(mongoose_node_num). --export([set_node_num/1]). --export([node_num/0]). - --include("mongoose.hrl"). --include("jlib.hrl"). --include("mongoose_config_spec.hrl"). --include("mongoose_logger.hrl"). +-export([node_num/0, set_node_num/1]). -type node_num() :: 0..255. --define(KEY, ?MODULE). -export_type([node_num/0]). %% @doc Return an integer node ID. -spec node_num() -> node_num(). node_num() -> - %% We just return 0 if service is not running. - persistent_term:get(?KEY, 0). + %% We just return 0 if set_node_num/1 is not called. + persistent_term:get(?MODULE, 0). -spec set_node_num(node_num()) -> ignore | updated | same. set_node_num(Num) -> @@ -25,6 +18,6 @@ set_node_num(Num) -> true -> same; false -> - persistent_term:put(?KEY, Num), + persistent_term:put(?MODULE, Num), updated end. From 0c08e1dee759efae6c548023606cb19c51c44152 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 8 Aug 2023 16:11:30 +0200 Subject: [PATCH 159/161] Reorder init steps in ejabberd_app again --- src/ejabberd_app.erl | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/ejabberd_app.erl b/src/ejabberd_app.erl index 87f1eeee12c..c1ede1526be 100644 --- a/src/ejabberd_app.erl +++ b/src/ejabberd_app.erl @@ -56,15 +56,13 @@ do_start() -> mongoose_fips:notify(), write_pid_file(), update_status_file(starting), - application:start(cache_tab), - + mongoose_config:start(), + mongoose_metrics:init(), + db_init(), mongoose_graphql:init(), translate:start(), ejabberd_commands:init(), mongoose_graphql_commands:start(), - mongoose_config:start(), - mongoose_metrics:init(), - db_init(), mongoose_router:start(), mongoose_logs:set_global_loglevel(mongoose_config:get_opt(loglevel)), mongoose_deprecations:start(), From dd37252fa0028c5832b6476fb5ab1159a2fbc823 Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 8 Aug 2023 16:17:56 +0200 Subject: [PATCH 160/161] Move CETS supervisor specs into mongoose_cets_discovery --- src/ejabberd_sup.erl | 33 +------------------------------ src/mongoose_cets_discovery.erl | 35 +++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 32 deletions(-) create mode 100644 src/mongoose_cets_discovery.erl diff --git a/src/ejabberd_sup.erl b/src/ejabberd_sup.erl index b0018bbeb2c..16c9d3aa7b8 100644 --- a/src/ejabberd_sup.erl +++ b/src/ejabberd_sup.erl @@ -168,7 +168,7 @@ init([]) -> Cleaner, SMBackendSupervisor, OutgoingPoolsSupervisor - ] ++ cets_specs() ++ [ + ] ++ mongoose_cets_discovery:supervisor_specs() ++ [ Router, S2S, Local, @@ -204,34 +204,3 @@ stop_child(Proc) -> supervisor:terminate_child(ejabberd_sup, Proc), supervisor:delete_child(ejabberd_sup, Proc), ok. - -cets_specs() -> - cets_specs(mongoose_config:get_opt([internal_databases, cets], disabled)). - -cets_specs(disabled) -> - []; -cets_specs(#{backend := DiscoBackend, cluster_name := ClusterName} = Opts) -> - DiscoFile = - case {DiscoBackend, Opts} of - {file, #{node_list_file := NodeFile}} -> - NodeFile; - {file, _} -> - ?LOG_CRITICAL(#{what => node_list_file_option_is_required, - text => <<"Specify internal_databases.cets.node_list_file option">>}), - error(node_list_file_option_is_required); - _ -> - undefined - end, - DiscoOpts = #{ - backend_module => disco_backend_to_module(DiscoBackend), - cluster_name => atom_to_binary(ClusterName), - node_name_to_insert => atom_to_binary(node(), latin1), - name => mongoose_cets_discovery, disco_file => DiscoFile}, - CetsDisco = - {cets_discovery, - {cets_discovery, start_link, [DiscoOpts]}, - permanent, infinity, supervisor, [cets_discovery]}, - [CetsDisco]. - -disco_backend_to_module(rdbms) -> mongoose_cets_discovery_rdbms; -disco_backend_to_module(file) -> cets_discovery_file. diff --git a/src/mongoose_cets_discovery.erl b/src/mongoose_cets_discovery.erl new file mode 100644 index 00000000000..e843846275c --- /dev/null +++ b/src/mongoose_cets_discovery.erl @@ -0,0 +1,35 @@ +-module(mongoose_cets_discovery). +-export([supervisor_specs/0]). + +-include("mongoose_logger.hrl"). + +supervisor_specs() -> + supervisor_specs(mongoose_config:get_opt([internal_databases, cets], disabled)). + +supervisor_specs(disabled) -> + []; +supervisor_specs(#{backend := DiscoBackend, cluster_name := ClusterName} = Opts) -> + DiscoFile = + case {DiscoBackend, Opts} of + {file, #{node_list_file := NodeFile}} -> + NodeFile; + {file, _} -> + ?LOG_CRITICAL(#{what => node_list_file_option_is_required, + text => <<"Specify internal_databases.cets.node_list_file option">>}), + error(node_list_file_option_is_required); + _ -> + undefined + end, + DiscoOpts = #{ + backend_module => disco_backend_to_module(DiscoBackend), + cluster_name => atom_to_binary(ClusterName), + node_name_to_insert => atom_to_binary(node(), latin1), + name => mongoose_cets_discovery, disco_file => DiscoFile}, + CetsDisco = + {cets_discovery, + {cets_discovery, start_link, [DiscoOpts]}, + permanent, infinity, supervisor, [cets_discovery]}, + [CetsDisco]. + +disco_backend_to_module(rdbms) -> mongoose_cets_discovery_rdbms; +disco_backend_to_module(file) -> cets_discovery_file. From c5faa49326f12ab22e3444f857b9b8a126194a2e Mon Sep 17 00:00:00 2001 From: Mikhail Uvarov Date: Tue, 8 Aug 2023 16:26:33 +0200 Subject: [PATCH 161/161] Move cets into included_applications in app config --- src/mongooseim.app.src | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/mongooseim.app.src b/src/mongooseim.app.src index d8351364167..630771af6bc 100644 --- a/src/mongooseim.app.src +++ b/src/mongooseim.app.src @@ -4,8 +4,7 @@ [{description, "MongooseIM"}, {vsn, {cmd, "tools/generate_vsn.sh"}}, {modules, []}, - {registered, [ - ]}, + {registered, []}, {applications, [ asn1, backoff, @@ -50,10 +49,9 @@ cowboy_swagger, tomerl, flatlog, - segmented_cache, - cets + segmented_cache ]}, - {included_applications, [mnesia]}, + {included_applications, [mnesia, cets]}, {env, []}, {mod, {ejabberd_app, []}}]}.