diff --git a/.circleci/template.yml b/.circleci/template.yml index 7c5729cfb6f..47d957e98cb 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -578,7 +578,7 @@ jobs: preset: type: enum enum: [internal_mnesia, mysql_redis, odbc_mssql_mnesia, ldap_mnesia, - elasticsearch_and_cassandra_mnesia, pgsql_mnesia] + elasticsearch_and_cassandra_mnesia, pgsql_mnesia, pgsql_cets] description: Preset to run default: internal_mnesia db: @@ -865,6 +865,15 @@ workflows: requires: - otp_25_docker filters: *all_tags + - big_tests_in_docker: + name: pgsql_cets_25 + executor: otp_25_pgsql_redis + context: mongooseim-org + preset: pgsql_cets + db: "mnesia postgres cets" + requires: + - otp_25_docker + filters: *all_tags - big_tests_in_docker: name: mysql_redis_25 executor: otp_25_mysql_redis diff --git a/big_tests/default.spec b/big_tests/default.spec index 0aea7131756..282bf73c8de 100644 --- a/big_tests/default.spec +++ b/big_tests/default.spec @@ -45,6 +45,7 @@ {suites, "tests", graphql_gdpr_SUITE}. {suites, "tests", graphql_token_SUITE}. {suites, "tests", graphql_mnesia_SUITE}. +{suites, "tests", graphql_cets_SUITE}. {suites, "tests", graphql_vcard_SUITE}. {suites, "tests", graphql_http_upload_SUITE}. {suites, "tests", graphql_server_SUITE}. @@ -115,6 +116,8 @@ {suites, "tests", dynamic_domains_SUITE}. {suites, "tests", local_iq_SUITE}. {suites, "tests", tcp_listener_SUITE}. +{suites, "tests", cets_disco_SUITE}. +{suites, "tests", start_node_id_SUITE}. {config, ["test.config"]}. {logdir, "ct_report"}. diff --git a/big_tests/dynamic_domains.spec b/big_tests/dynamic_domains.spec index aed34e1a66c..d0c13ddfc97 100644 --- a/big_tests/dynamic_domains.spec +++ b/big_tests/dynamic_domains.spec @@ -63,6 +63,7 @@ {suites, "tests", graphql_gdpr_SUITE}. {suites, "tests", graphql_token_SUITE}. {suites, "tests", graphql_mnesia_SUITE}. +{suites, "tests", graphql_cets_SUITE}. {suites, "tests", graphql_http_upload_SUITE}. {suites, "tests", graphql_server_SUITE}. {suites, "tests", graphql_metric_SUITE}. @@ -157,6 +158,8 @@ {suites, "tests", domain_removal_SUITE}. {suites, "tests", local_iq_SUITE}. {suites, "tests", tcp_listener_SUITE}. +{suites, "tests", cets_disco_SUITE}. +{suites, "tests", start_node_id_SUITE}. {config, ["dynamic_domains.config", "test.config"]}. diff --git a/big_tests/src/mim_loglevel.erl b/big_tests/src/mim_loglevel.erl index 39a951dd573..3cd01ad3390 100644 --- a/big_tests/src/mim_loglevel.erl +++ b/big_tests/src/mim_loglevel.erl @@ -1,6 +1,8 @@ -module(mim_loglevel). -export([enable_logging/2]). -export([disable_logging/2]). +-export([save_log_level/1]). +-export([restore_log_level/1]). enable_logging(Hosts, Levels) -> [set_custom(Host, Module, Level) || Host <- Hosts, {Module, Level} <- Levels]. @@ -15,3 +17,14 @@ set_custom(Host, Module, Level) -> clear_custom(Host, Module, _Level) -> Node = ct:get_config({hosts, Host, node}), mongoose_helper:successful_rpc(#{node => Node}, mongoose_logs, clear_module_loglevel, [Module]). + +save_log_level(Config) -> + Node = distributed_helper:mim(), + OldLogLevel = distributed_helper:rpc(Node, mongoose_logs, get_global_loglevel, []), + [{old_log_level, OldLogLevel} | Config]. + +restore_log_level(Config) -> + Node = distributed_helper:mim(), + {old_log_level, OldLogLevel} = lists:keyfind(old_log_level, 1, Config), + ok = distributed_helper:rpc(Node, mongoose_logs, set_global_loglevel, [OldLogLevel]), + ok. diff --git a/big_tests/test.config b/big_tests/test.config index c6839c7e92c..0b5a3864326 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -237,6 +237,36 @@ {outgoing_pools, "[outgoing_pools.redis.global_distrib] scope = \"global\" workers = 10"}]}, + {pgsql_cets, + [{dbs, [redis, pgsql]}, + {sm_backend, "\"cets\""}, + {bosh_backend, "\"cets\""}, + {component_backend, "\"cets\""}, + {s2s_backend, "\"cets\""}, + {stream_management_backend, cets}, + {muc_online_backend, cets}, + {jingle_sip_backend, cets}, + {auth_method, "rdbms"}, + {internal_databases, "[internal_databases.cets] + cluster_name = \"{{cluster_name}}\""}, + {outgoing_pools, "[outgoing_pools.redis.global_distrib] + scope = \"global\" + workers = 10 +[outgoing_pools.rdbms.default] + scope = \"global\" + workers = 5 + connection.driver = \"pgsql\" + connection.host = \"localhost\" + connection.database = \"ejabberd\" + connection.username = \"ejabberd\" + connection.password = \"mongooseim_secret\" + connection.tls.required = true + connection.tls.cacertfile = \"priv/ssl/cacert.pem\" + connection.tls.server_name_indication.enabled = false"}, + {service_domain_db, ""}, + {mod_vcard, " backend = \"rdbms\" + host = \"vjud.@HOST@\"\n"}, + {mod_roster, " backend = \"rdbms\"\n"}]}, {pgsql_mnesia, [{dbs, [redis, pgsql]}, {auth_method, "rdbms"}, diff --git a/big_tests/tests/cets_disco_SUITE.erl b/big_tests/tests/cets_disco_SUITE.erl new file mode 100644 index 00000000000..35e99c7ed52 --- /dev/null +++ b/big_tests/tests/cets_disco_SUITE.erl @@ -0,0 +1,76 @@ +-module(cets_disco_SUITE). +-compile([export_all, nowarn_export_all]). + +-import(distributed_helper, [mim/0, rpc/4]). +-include_lib("common_test/include/ct.hrl"). + +%%-------------------------------------------------------------------- +%% Suite configuration +%%-------------------------------------------------------------------- + +all() -> + [{group, file}, {group, rdbms}]. + +groups() -> + [{file, [], file_cases()}, + {rdbms, [], rdbms_cases()}]. + +file_cases() -> + [file_backend]. + +rdbms_cases() -> + [rdbms_backend]. + +suite() -> + escalus:suite(). + +%%-------------------------------------------------------------------- +%% Init & teardown +%%-------------------------------------------------------------------- +init_per_suite(Config) -> + escalus:init_per_suite(Config). + +end_per_suite(Config) -> + escalus:end_per_suite(Config). + +init_per_group(rdbms, Config) -> + case not ct_helper:is_ct_running() + orelse mongoose_helper:is_rdbms_enabled(domain_helper:host_type()) of + false -> {skip, rdbms_or_ct_not_running}; + true -> Config + end; +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(CaseName, Config) -> + escalus:init_per_testcase(CaseName, Config). + +end_per_testcase(CaseName, Config) -> + escalus:end_per_testcase(CaseName, Config). + +%%-------------------------------------------------------------------- +%% Test cases +%%-------------------------------------------------------------------- + +file_backend(Config) -> + Path = filename:join(?config(mim_data_dir, Config), "nodes.txt"), + Opts = #{disco_file => Path}, + State = rpc(mim(), cets_discovery_file, init, [Opts]), + {{ok, Nodes}, _} = rpc(mim(), cets_discovery_file, get_nodes, [State]), + ['node1@localhost', 'node2@otherhost'] = lists:sort(Nodes). + +rdbms_backend(_Config) -> + Opts1 = #{cluster_name => <<"big_test">>, node_name_to_insert => <<"test1">>}, + Opts2 = #{cluster_name => <<"big_test">>, node_name_to_insert => <<"test2">>}, + State1 = rpc(mim(), mongoose_cets_discovery_rdbms, init, [Opts1]), + rpc(mim(), mongoose_cets_discovery_rdbms, get_nodes, [State1]), + State2 = rpc(mim(), mongoose_cets_discovery_rdbms, init, [Opts2]), + {{ok, Nodes}, State2_2} = rpc(mim(), mongoose_cets_discovery_rdbms, get_nodes, [State2]), + %% "test2" node can see "test1" + true = lists:member(test1, Nodes), + {{ok, _}, State2_3} = rpc(mim(), mongoose_cets_discovery_rdbms, get_nodes, [State2_2]), + %% Check that we follow the right code branch + #{last_query_info := #{already_registered := true}} = State2_3. diff --git a/big_tests/tests/cets_disco_SUITE_data/nodes.txt b/big_tests/tests/cets_disco_SUITE_data/nodes.txt new file mode 100644 index 00000000000..8e85e526bd8 --- /dev/null +++ b/big_tests/tests/cets_disco_SUITE_data/nodes.txt @@ -0,0 +1,2 @@ +node1@localhost +node2@otherhost diff --git a/big_tests/tests/component_helper.erl b/big_tests/tests/component_helper.erl index 05d0fa04a7e..2a4f450c678 100644 --- a/big_tests/tests/component_helper.erl +++ b/big_tests/tests/component_helper.erl @@ -45,7 +45,7 @@ disconnect_component(Component, Addr) -> disconnect_components(Components, Addr) -> %% TODO replace 'kill' with 'stop' when server supports stream closing [escalus_connection:kill(Component) || Component <- Components], - mongoose_helper:wait_until(fun() -> rpc(ejabberd_router, lookup_component, [Addr]) =:= [] end, true, + mongoose_helper:wait_until(fun() -> rpc(mongoose_component, lookup_component, [Addr]) =:= [] end, true, #{name => rpc}). rpc(M, F, A) -> diff --git a/big_tests/tests/ct_helper.erl b/big_tests/tests/ct_helper.erl index dd5a0f01973..a7bd86833e9 100644 --- a/big_tests/tests/ct_helper.erl +++ b/big_tests/tests/ct_helper.erl @@ -4,7 +4,8 @@ repeat_all_until_all_ok/2, repeat_all_until_any_fail/1, repeat_all_until_any_fail/2, - groups_to_all/1]). + groups_to_all/1, + get_preset_var/3]). -type group_name() :: atom(). @@ -114,3 +115,12 @@ is_ct_started() -> groups_to_all(Groups) -> [{group, Name} || {Name, _Opts, _Cases} <- Groups]. + +get_preset_var(Config, Opt, Def) -> + case proplists:get_value(preset, Config, undefined) of + Preset -> + PresetAtom = list_to_existing_atom(Preset), + ct:get_config({presets, toml, PresetAtom, Opt}, Def); + _ -> + Def + end. diff --git a/big_tests/tests/domain_removal_SUITE.erl b/big_tests/tests/domain_removal_SUITE.erl index 579376bd6ac..953b3280f4a 100644 --- a/big_tests/tests/domain_removal_SUITE.erl +++ b/big_tests/tests/domain_removal_SUITE.erl @@ -162,7 +162,7 @@ is_internal_or_rdbms() -> %%%=================================================================== init_per_testcase(muc_removal, Config) -> - muc_helper:load_muc(), + muc_helper:load_muc(Config), mongoose_helper:ensure_muc_clean(), escalus:init_per_testcase(muc_removal, Config); init_per_testcase(roster_removal, ConfigIn) -> diff --git a/big_tests/tests/gdpr_SUITE.erl b/big_tests/tests/gdpr_SUITE.erl index 9174ed44eec..e19c4a0dff3 100644 --- a/big_tests/tests/gdpr_SUITE.erl +++ b/big_tests/tests/gdpr_SUITE.erl @@ -180,7 +180,7 @@ all_mam_testcases() -> init_per_suite(Config) -> #{node := MimNode} = distributed_helper:mim(), Config1 = [{{ejabberd_cwd, MimNode}, get_mim_cwd()} | dynamic_modules:save_modules(host_type(), Config)], - muc_helper:load_muc(), + muc_helper:load_muc(Config), escalus:init_per_suite(Config1). end_per_suite(Config) -> @@ -243,7 +243,7 @@ init_per_testcase(CN, Config) when Config1; init_per_testcase(CN, Config) when CN =:= retrieve_inbox_muc; CN =:= remove_inbox_muc -> - muc_helper:load_muc(), + muc_helper:load_muc(Config), Config0 = init_inbox(CN, Config, muc), Config0; diff --git a/big_tests/tests/graphql_cets_SUITE.erl b/big_tests/tests/graphql_cets_SUITE.erl new file mode 100644 index 00000000000..63302ed29cc --- /dev/null +++ b/big_tests/tests/graphql_cets_SUITE.erl @@ -0,0 +1,77 @@ +-module(graphql_cets_SUITE). +-include_lib("eunit/include/eunit.hrl"). + +-compile([export_all, nowarn_export_all]). + +-import(distributed_helper, [mim/0]). +-import(domain_helper, [host_type/1]). +-import(mongooseimctl_helper, [rpc_call/3]). +-import(graphql_helper, [execute_command/4, get_unauthorized/1, get_ok_value/2]). + +all() -> + [{group, admin_cets_cli}, + {group, admin_cets_http}, + {group, domain_admin_cets}]. + +groups() -> + [{admin_cets_http, [sequence], admin_cets_tests()}, + {admin_cets_cli, [sequence], admin_cets_tests()}, + {domain_admin_cets, [], domain_admin_tests()}]. + +admin_cets_tests() -> + [has_sm_table_in_info]. + +domain_admin_tests() -> + [domain_admin_get_info_test]. + +init_per_suite(Config) -> + Config1 = escalus:init_per_suite(Config), + ejabberd_node_utils:init(mim(), Config1). + +end_per_suite(Config) -> + escalus:end_per_suite(Config). + +init_per_group(admin_cets_http, Config) -> + graphql_helper:init_admin_handler(Config); +init_per_group(admin_cets_cli, Config) -> + graphql_helper:init_admin_cli(Config); +init_per_group(domain_admin_cets, Config) -> + graphql_helper:init_domain_admin_handler(Config). + +end_per_group(_, _Config) -> + graphql_helper:clean(), + escalus_fresh:clean(). + +init_per_testcase(has_sm_table_in_info, Config) -> + case rpc_call(ejabberd_sm, sm_backend, []) of + ejabberd_sm_cets -> + Config; + _ -> + {skip, "SM backend is not CETS"} + end; +init_per_testcase(_, Config) -> + Config. + +% Admin tests + +has_sm_table_in_info(Config) -> + Res = get_info(Config), + Tables = get_ok_value([data, cets, systemInfo], Res), + [T] = [T || T = #{<<"tableName">> := <<"cets_session">>} <- Tables], + #{<<"memory">> := Mem, <<"nodes">> := Nodes, <<"size">> := Size} = T, + true = is_integer(Mem), + true = is_integer(Size), + #{node := Node1} = mim(), + lists:member(Node1, Nodes). + +% Domain admin tests + +domain_admin_get_info_test(Config) -> + get_unauthorized(get_info(Config)). + +%-------------------------------------------------------------------------------------------------- +% Helpers +%-------------------------------------------------------------------------------------------------- + +get_info(Config) -> + execute_command(<<"cets">>, <<"systemInfo">>, #{}, Config). diff --git a/big_tests/tests/graphql_metric_SUITE.erl b/big_tests/tests/graphql_metric_SUITE.erl index 3326dfaac7e..f4ae3eb559a 100644 --- a/big_tests/tests/graphql_metric_SUITE.erl +++ b/big_tests/tests/graphql_metric_SUITE.erl @@ -287,13 +287,13 @@ get_cluster_metrics_by_nonexistent_name(Config) -> Result = get_cluster_metrics_as_dicts_by_name([<<"nonexistent">>], Config), ParsedResult = get_ok_value([data, metric, getClusterMetricsAsDicts], Result), [#{<<"node">> := _, <<"result">> := []}, - #{<<"node">> := _, <<"result">> := []}] = ParsedResult. + #{<<"node">> := _, <<"result">> := []}|_] = ParsedResult. %% two or three nodes. get_cluster_metrics_with_nonexistent_key(Config) -> Result = get_cluster_metrics_as_dicts_with_keys([<<"nonexistent">>], Config), ParsedResult = get_ok_value([data, metric, getClusterMetricsAsDicts], Result), [#{<<"node">> := _, <<"result">> := [_|_]}, - #{<<"node">> := _, <<"result">> := [_|_]}] = ParsedResult. + #{<<"node">> := _, <<"result">> := [_|_]}|_] = ParsedResult. get_cluster_metrics_empty_args(Config) -> Node = atom_to_binary(maps:get(node, distributed_helper:mim2())), @@ -445,7 +445,12 @@ check_spiral_dict(Dict) -> ?assert(is_integer(One)). values_are_integers(Map, Keys) -> - lists:foreach(fun(Key) -> ?assert(is_integer(maps:get(Key, Map))) end, Keys). + case lists:all(fun(Key) -> is_integer(maps:get(Key, Map)) end, Keys) of + true -> + ok; + false -> + ct:fail({values_are_integers, Keys, Map}) + end. metric_host_type() -> binary:replace(domain_helper:host_type(), <<" ">>, <<"_">>, [global]). diff --git a/big_tests/tests/graphql_muc_SUITE.erl b/big_tests/tests/graphql_muc_SUITE.erl index dd968c3d6b4..b57d31dc99b 100644 --- a/big_tests/tests/graphql_muc_SUITE.erl +++ b/big_tests/tests/graphql_muc_SUITE.erl @@ -237,20 +237,20 @@ init_per_group(admin_cli, Config) -> graphql_helper:init_admin_cli(Config); init_per_group(domain_admin_muc, Config) -> maybe_enable_mam(), - ensure_muc_started(), + ensure_muc_started(Config), graphql_helper:init_domain_admin_handler(Config); init_per_group(user, Config) -> graphql_helper:init_user(Config); init_per_group(Group, Config) when Group =:= admin_muc_configured; Group =:= user_muc_configured -> disable_mam(), - ensure_muc_started(), + ensure_muc_started(Config), Config; init_per_group(Group, Config) when Group =:= admin_muc_and_mam_configured; Group =:= user_muc_and_mam_configured -> case maybe_enable_mam() of true -> - ensure_muc_started(), + ensure_muc_started(Config), ensure_muc_light_started(Config); false -> {skip, "No MAM backend available"} @@ -277,10 +277,10 @@ maybe_enable_mam() -> true end. -ensure_muc_started() -> +ensure_muc_started(Config) -> SecondaryHostType = domain_helper:secondary_host_type(), - muc_helper:load_muc(), - muc_helper:load_muc(SecondaryHostType), + muc_helper:load_muc(Config), + muc_helper:load_muc(Config, SecondaryHostType), mongoose_helper:ensure_muc_clean(). ensure_muc_stopped() -> diff --git a/big_tests/tests/graphql_muc_light_SUITE.erl b/big_tests/tests/graphql_muc_light_SUITE.erl index c9deff69444..872633016a5 100644 --- a/big_tests/tests/graphql_muc_light_SUITE.erl +++ b/big_tests/tests/graphql_muc_light_SUITE.erl @@ -237,7 +237,7 @@ init_per_group(Group, Config) when Group =:= user_muc_light_with_mam; Group =:= domain_admin_muc_light_with_mam -> case maybe_enable_mam() of true -> - ensure_muc_started(), + ensure_muc_started(Config), ensure_muc_light_started(Config); false -> {skip, "No MAM backend available"} @@ -281,8 +281,8 @@ ensure_muc_light_stopped(Config) -> dynamic_modules:ensure_modules(SecondaryHostType, [{mod_muc_light, stopped}]), [{muc_light_host, <<"NON_EXISTING">>} | Config]. -ensure_muc_started() -> - muc_helper:load_muc(), +ensure_muc_started(Config) -> + muc_helper:load_muc(Config), mongoose_helper:ensure_muc_clean(). ensure_muc_stopped() -> diff --git a/big_tests/tests/graphql_server_SUITE.erl b/big_tests/tests/graphql_server_SUITE.erl index 0aca24eb405..288c2f55c1d 100644 --- a/big_tests/tests/graphql_server_SUITE.erl +++ b/big_tests/tests/graphql_server_SUITE.erl @@ -79,7 +79,7 @@ init_per_group(admin_http, Config) -> graphql_helper:init_admin_handler(Config); init_per_group(admin_cli, Config) -> graphql_helper:init_admin_cli(Config); -init_per_group(clustering_tests, Config) -> +init_per_group(Group, Config) when Group =:= clustering_tests; Group =:= clustering_http_tests -> case is_sm_distributed() of true -> Config; @@ -96,9 +96,16 @@ end_per_group(Group, _Config) when Group =:= admin_http; end_per_group(_, _Config) -> escalus_fresh:clean(). +init_per_testcase(set_and_get_loglevel_test = CaseName, Config) -> + Config1 = mim_loglevel:save_log_level(Config), + escalus:init_per_testcase(CaseName, Config1); init_per_testcase(CaseName, Config) -> escalus:init_per_testcase(CaseName, Config). + +end_per_testcase(set_and_get_loglevel_test = CaseName, Config) -> + mim_loglevel:restore_log_level(Config), + escalus:end_per_testcase(CaseName, Config); end_per_testcase(CaseName, Config) when CaseName == join_successful orelse CaseName == join_successful_http orelse CaseName == join_twice -> diff --git a/big_tests/tests/inbox_SUITE.erl b/big_tests/tests/inbox_SUITE.erl index 665831b3518..a3ba49cc830 100644 --- a/big_tests/tests/inbox_SUITE.erl +++ b/big_tests/tests/inbox_SUITE.erl @@ -170,10 +170,12 @@ init_per_group(GroupName, Config) when GroupName =:= regular; GroupName =:= asyn Config1 = dynamic_modules:save_modules(HostType, Config), Config2 = dynamic_modules:save_modules(SecHostType, Config1), InboxOptions = inbox_helper:inbox_opts(GroupName), + Backend = mongoose_helper:mnesia_or_rdbms_backend(), + ModOffline = config_parser_helper:mod_config(mod_offline, #{backend => Backend}), ok = dynamic_modules:ensure_modules(HostType, inbox_helper:inbox_modules(GroupName) ++ inbox_helper:muclight_modules() - ++ [{mod_offline, config_parser_helper:default_mod_config(mod_offline)}]), + ++ [{mod_offline, ModOffline}]), ok = dynamic_modules:ensure_modules(SecHostType, [{mod_inbox, InboxOptions#{aff_changes := false}}]), [{inbox_opts, InboxOptions} | Config2]; @@ -186,7 +188,7 @@ init_per_group(muclight_config, Config) -> Config1 = inbox_helper:reload_inbox_option(Config, groupchat, [muclight]), escalus:create_users(Config1, escalus:get_users([alice, alice_bis, bob, kate, mike])); init_per_group(muc, Config) -> - muc_helper:load_muc(), + muc_helper:load_muc(Config), inbox_helper:reload_inbox_option(Config, groupchat, [muc]); init_per_group(limit_result, Config) -> OptKey = [{modules, domain_helper:host_type()}, mod_inbox, max_result_limit], diff --git a/big_tests/tests/jingle_SUITE.erl b/big_tests/tests/jingle_SUITE.erl index c25d61f402d..bfc1698bfc7 100644 --- a/big_tests/tests/jingle_SUITE.erl +++ b/big_tests/tests/jingle_SUITE.erl @@ -59,7 +59,7 @@ init_per_suite(Config) -> case rpc(mim(), application, get_application, [nksip]) of {ok, nksip} -> distributed_helper:add_node_to_cluster(mim2(), Config), - start_nksip_in_mim_nodes(), + start_nksip_in_mim_nodes(Config), application:ensure_all_started(esip), spawn(fun() -> ets:new(jingle_sip_translator, [public, named_table]), ets:new(jingle_sip_translator_bindings, [public, named_table]), @@ -71,9 +71,9 @@ init_per_suite(Config) -> {skip, build_was_not_configured_with_jingle_sip} end. -start_nksip_in_mim_nodes() -> - Pid1 = start_nskip_in_parallel(mim(), #{}), - Pid2 = start_nskip_in_parallel(mim2(), #{listen_port => 12346}), +start_nksip_in_mim_nodes(Config) -> + Pid1 = start_nskip_in_parallel(Config, mim(), #{}), + Pid2 = start_nskip_in_parallel(Config, mim2(), #{listen_port => 12346}), wait_for_process_to_stop(Pid1), wait_for_process_to_stop(Pid2). @@ -85,10 +85,11 @@ wait_for_process_to_stop(Pid) -> ct:fail(wait_for_process_to_stop_timeout) end. -start_nskip_in_parallel(NodeSpec, ExtraOpts) -> +start_nskip_in_parallel(Config, NodeSpec, ExtraOpts) -> Domain = domain(), Opts = #{proxy_host => <<"localhost">>, - proxy_port => 12345}, + proxy_port => 12345, + backend => ct_helper:get_preset_var(Config, jingle_sip_backend, mnesia)}, OptsWithExtra = maps:merge(Opts, ExtraOpts), AllOpts = config_parser_helper:mod_config(mod_jingle_sip, OptsWithExtra), RPCSpec = NodeSpec#{timeout => timer:seconds(60)}, diff --git a/big_tests/tests/mam_SUITE.erl b/big_tests/tests/mam_SUITE.erl index 30a5bb29f34..a6f244c40f6 100644 --- a/big_tests/tests/mam_SUITE.erl +++ b/big_tests/tests/mam_SUITE.erl @@ -540,7 +540,7 @@ suite() -> require_rpc_nodes([mim]) ++ escalus:suite(). init_per_suite(Config) -> - muc_helper:load_muc(), + muc_helper:load_muc(Config), mam_helper:prepare_for_suite( increase_limits( delete_users([{escalus_user_db, {module, escalus_ejabberd}} diff --git a/big_tests/tests/mod_event_pusher_rabbit_SUITE.erl b/big_tests/tests/mod_event_pusher_rabbit_SUITE.erl index 00f8fc5b94d..a4945aeea80 100644 --- a/big_tests/tests/mod_event_pusher_rabbit_SUITE.erl +++ b/big_tests/tests/mod_event_pusher_rabbit_SUITE.erl @@ -110,7 +110,7 @@ init_per_suite(Config) -> true -> start_rabbit_wpool(domain()), {ok, _} = application:ensure_all_started(amqp_client), - muc_helper:load_muc(), + muc_helper:load_muc(Config), escalus:init_per_suite(Config); false -> {skip, "RabbitMQ server is not available on default port."} diff --git a/big_tests/tests/mod_event_pusher_sns_SUITE.erl b/big_tests/tests/mod_event_pusher_sns_SUITE.erl index e3ab7be7d4e..217eeddd94e 100644 --- a/big_tests/tests/mod_event_pusher_sns_SUITE.erl +++ b/big_tests/tests/mod_event_pusher_sns_SUITE.erl @@ -60,7 +60,7 @@ init_per_suite(Config) -> %% For mocking with unnamed functions mongoose_helper:inject_module(?MODULE), - muc_helper:load_muc(), + muc_helper:load_muc(Config), escalus:init_per_suite(Config); {error, _} -> {skip, "erlcloud dependency is not enabled"} diff --git a/big_tests/tests/mod_global_distrib_SUITE.erl b/big_tests/tests/mod_global_distrib_SUITE.erl index 4682446fcdc..a193399d5c4 100644 --- a/big_tests/tests/mod_global_distrib_SUITE.erl +++ b/big_tests/tests/mod_global_distrib_SUITE.erl @@ -254,7 +254,7 @@ init_per_testcase(CaseName, Config) {_, EuropeHost, _} = lists:keyfind(europe_node1, 1, get_hosts()), trigger_rebalance(asia_node, EuropeHost), %% Load muc on mim node - muc_helper:load_muc(), + muc_helper:load_muc(Config), RegNode = ct:get_config({hosts, reg, node}), %% Wait for muc.localhost to become visible from reg node wait_for_domain(RegNode, muc_helper:muc_host()), diff --git a/big_tests/tests/mongoose_helper.erl b/big_tests/tests/mongoose_helper.erl index 31fbb9a2f83..93aa4248d2c 100644 --- a/big_tests/tests/mongoose_helper.erl +++ b/big_tests/tests/mongoose_helper.erl @@ -242,7 +242,8 @@ stop_online_rooms() -> false -> ct:fail({ejabberd_mod_muc_sup_not_found, Supervisor, HostType}) end, rpc(mim(), erlang, exit, [SupervisorPid, kill]), - rpc(mim(), mnesia, clear_table, [muc_online_room]), + %% That's a pretty dirty way + rpc(mim(), mongoose_muc_online_backend, clear_table, [HostType]), ok. forget_persistent_rooms() -> @@ -499,13 +500,8 @@ restart_listener(Spec, Listener) -> rpc(Spec, mongoose_listener, start_listener, [Listener]). should_minio_be_running(Config) -> - case proplists:get_value(preset, Config, undefined) of - undefined -> false; - Preset -> - PresetAtom = list_to_existing_atom(Preset), - DBs = ct:get_config({presets, toml, PresetAtom, dbs}, []), - lists:member(minio, DBs) - end. + DBs = ct_helper:get_preset_var(Config, dbs, []), + lists:member(minio, DBs). %% It is useful to debug dynamic IQ handler registration print_debug_info_for_module(Module) -> diff --git a/big_tests/tests/muc_SUITE.erl b/big_tests/tests/muc_SUITE.erl index 1439f4afb20..738b8dc7cc3 100644 --- a/big_tests/tests/muc_SUITE.erl +++ b/big_tests/tests/muc_SUITE.erl @@ -29,8 +29,6 @@ -import(muc_helper, [muc_host/0, - load_muc/0, - unload_muc/0, start_room/5, generate_rpc_jid/1, destroy_room/1, @@ -318,7 +316,7 @@ rsm_cases() -> rsm_cases_with_offline() -> [pagination_all_with_offline]. suite() -> - s2s_helper:suite(escalus:suite()). + distributed_helper:require_rpc_nodes([mim, fed]) ++ escalus:suite(). %%-------------------------------------------------------------------- %% Init & teardown @@ -331,14 +329,14 @@ init_per_suite(Config) -> Config2 = escalus:init_per_suite(Config), Config3 = dynamic_modules:save_modules(host_type(), Config2), dynamic_modules:restart(host_type(), mod_disco, default_mod_config(mod_disco)), - load_muc(), + muc_helper:load_muc(Config), mongoose_helper:ensure_muc_clean(), Config3. end_per_suite(Config) -> escalus_fresh:clean(), mongoose_helper:ensure_muc_clean(), - unload_muc(), + muc_helper:unload_muc(), dynamic_modules:restore_modules(Config), escalus:end_per_suite(Config). diff --git a/big_tests/tests/muc_helper.erl b/big_tests/tests/muc_helper.erl index bca13681e85..39ba0950ee5 100644 --- a/big_tests/tests/muc_helper.erl +++ b/big_tests/tests/muc_helper.erl @@ -52,14 +52,15 @@ foreach_recipient(Users, VerifyFun) -> VerifyFun(escalus:wait_for_stanza(Recipient)) end, Users). -load_muc() -> - load_muc(domain_helper:host_type()). +load_muc(Config) -> + load_muc(Config, domain_helper:host_type()). -load_muc(HostType) -> +load_muc(Config, HostType) -> Backend = muc_backend(), MucHostPattern = ct:get_config({hosts, mim, muc_service_pattern}), ct:log("Starting MUC for ~p", [HostType]), Opts = #{host => subhost_pattern(MucHostPattern), backend => Backend, + online_backend => muc_online_backend(Config), hibernate_timeout => 2000, hibernated_room_check_interval => 1000, hibernated_room_timeout => 2000, @@ -86,6 +87,9 @@ muc_host_pattern() -> muc_backend() -> mongoose_helper:mnesia_or_rdbms_backend(). +muc_online_backend(Config) when is_list(Config) -> + ct_helper:get_preset_var(Config, muc_online_backend, mnesia). + start_room(Config, User, Room, Nick, Opts) -> From = generate_rpc_jid(User), create_instant_room(Room, From, Nick, Opts), @@ -176,15 +180,26 @@ destroy_room(Config) -> destroy_room(muc_host(), ?config(room, Config)). destroy_room(Host, Room) when is_binary(Host), is_binary(Room) -> + HostType = domain_helper:host_type(), Room1 = jid:nodeprep(Room), - case rpc(mim(), ets, lookup, [muc_online_room, {Room1, Host}]) of - [{_,_,Pid}|_] -> + case rpc(mim(), mongoose_muc_online_backend, find_room_pid, [HostType, Host, Room1]) of + {ok, Pid} -> %% @TODO related to gen_fsm_compat: after migration to gen_statem %% should be replaced to - gen_statem:call(Pid, destroy). Pid ! {'$gen_all_state_event', destroy}, + wait_for_process_down(Pid), ok; - _ -> + {error, not_found} -> + ok + end. + +wait_for_process_down(Pid) -> + Ref = monitor(process, Pid), + receive + {'DOWN', Ref, _Type, Pid, _Info} -> ok + after 5000 -> + ct:fail(wait_for_process_down_failed) end. stanza_muc_enter_room(Room, Nick) -> diff --git a/big_tests/tests/muc_http_api_SUITE.erl b/big_tests/tests/muc_http_api_SUITE.erl index 2b49c7c51ac..5fd2eecec98 100644 --- a/big_tests/tests/muc_http_api_SUITE.erl +++ b/big_tests/tests/muc_http_api_SUITE.erl @@ -66,7 +66,7 @@ failure_response() -> %%-------------------------------------------------------------------- init_per_suite(Config) -> - muc_helper:load_muc(), + muc_helper:load_muc(Config), escalus:init_per_suite(Config). end_per_suite(Config) -> diff --git a/big_tests/tests/push_integration_SUITE.erl b/big_tests/tests/push_integration_SUITE.erl index b91108b0d02..5fc2551f96e 100644 --- a/big_tests/tests/push_integration_SUITE.erl +++ b/big_tests/tests/push_integration_SUITE.erl @@ -1015,8 +1015,9 @@ mongoose_push_api_for_group(_) -> <<"v3">>. required_modules_for_group(pm_notifications_with_inbox, API, PubSubHost) -> + Backend = mongoose_helper:mnesia_or_rdbms_backend(), [{mod_inbox, inbox_opts()}, - {mod_offline, config_parser_helper:mod_config(mod_offline, #{})} | + {mod_offline, config_parser_helper:mod_config(mod_offline, #{backend => Backend})} | required_modules(API, PubSubHost)]; required_modules_for_group(groupchat_notifications_with_inbox, API, PubSubHost)-> [{mod_inbox, inbox_opts()}, {mod_muc_light, muc_light_opts()} @@ -1024,10 +1025,11 @@ required_modules_for_group(groupchat_notifications_with_inbox, API, PubSubHost)- required_modules_for_group(muclight_msg_notifications, API, PubSubHost) -> [{mod_muc_light, muc_light_opts()} | required_modules(API, PubSubHost)]; required_modules_for_group(integration_with_sm_and_offline_storage, API, PubSubHost) -> + Backend = mongoose_helper:mnesia_or_rdbms_backend(), [{mod_muc_light, muc_light_opts()}, {mod_stream_management, config_parser_helper:mod_config(mod_stream_management, #{ack_freq => never, resume_timeout => 1})}, - {mod_offline, config_parser_helper:mod_config(mod_offline, #{})} | + {mod_offline, config_parser_helper:mod_config(mod_offline, #{backend => Backend})} | required_modules(API, PubSubHost)]; required_modules_for_group(enhanced_integration_with_sm, API, PubSubHost) -> [{mod_stream_management, config_parser_helper:mod_config(mod_stream_management, #{ack_freq => never})} | diff --git a/big_tests/tests/push_pubsub_SUITE.erl b/big_tests/tests/push_pubsub_SUITE.erl index 17b728f62a3..80c9f9a1ca6 100644 --- a/big_tests/tests/push_pubsub_SUITE.erl +++ b/big_tests/tests/push_pubsub_SUITE.erl @@ -425,7 +425,8 @@ required_modules(APIVersion) -> [{mod_pubsub, config_parser_helper:mod_config(mod_pubsub, #{ plugins => [<<"dag">>, <<"push">>], nodetree => nodetree_dag, - host => subhost_pattern(?PUBSUB_SUB_DOMAIN ++ ".@HOST@") + host => subhost_pattern(?PUBSUB_SUB_DOMAIN ++ ".@HOST@"), + backend => mongoose_helper:mnesia_or_rdbms_backend() })}, {mod_push_service_mongoosepush, config_parser_helper:mod_config(mod_push_service_mongoosepush, #{pool_name => mongoose_push_http, diff --git a/big_tests/tests/rest_client_SUITE.erl b/big_tests/tests/rest_client_SUITE.erl index c1bd5f06736..aeed6ea3f58 100644 --- a/big_tests/tests/rest_client_SUITE.erl +++ b/big_tests/tests/rest_client_SUITE.erl @@ -209,7 +209,8 @@ muc_light_opts(suite) -> #{}. common_muc_light_opts() -> - #{rooms_in_rosters => true}. + #{rooms_in_rosters => true, + backend => mongoose_helper:mnesia_or_rdbms_backend()}. %% -------------------------------------------------------------------- %% Test cases diff --git a/big_tests/tests/rest_helper.erl b/big_tests/tests/rest_helper.erl index 338d8241e78..1ecdc1ebc80 100644 --- a/big_tests/tests/rest_helper.erl +++ b/big_tests/tests/rest_helper.erl @@ -36,7 +36,7 @@ assert_inlist(Pattern, L) -> Fl = lists:filter(fun(X) -> case X of Pattern -> true; _ -> false end end, L), case Fl of [] -> - ct:fail(io_lib:format("Fail: ~p not in [~p...]", [Pattern, H])); + ct:fail("Fail: ~p not in [~p...]", [Pattern, H]); _ -> Fl end. @@ -49,13 +49,13 @@ assert_notinlist(Pattern, L) -> [] -> ok; _ -> - ct:fail(io_lib:format("Fail: ~p in ~p", [Pattern, L])) + ct:fail("Fail: ~p in ~p", [Pattern, L]) end. assert_inmaplist([], Map, L, [H|_]) -> case L of [] -> - ct:fail(io_lib:format("Fail: ~p not in [~p...]", [Map, H])); + ct:fail("Fail: ~p not in [~p...]", [Map, H]); _ -> L end; @@ -70,7 +70,7 @@ assert_notinmaplist([], Map, L, [H|_]) -> [] -> ok; _ -> - ct:fail(io_lib:format("Fail: ~p in [~p...]", [Map, H])) + ct:fail("Fail: ~p in [~p...]", [Map, H]) end; assert_notinmaplist([K|Keys], Map, L, Orig) -> V = maps:get(K, Map), diff --git a/big_tests/tests/s2s_SUITE.erl b/big_tests/tests/s2s_SUITE.erl index 08911f0a01c..a7dd3798779 100644 --- a/big_tests/tests/s2s_SUITE.erl +++ b/big_tests/tests/s2s_SUITE.erl @@ -10,9 +10,11 @@ -include_lib("escalus/include/escalus.hrl"). -include_lib("exml/include/exml.hrl"). -include_lib("exml/include/exml_stream.hrl"). +-include_lib("eunit/include/eunit.hrl"). %% Module aliases -define(dh, distributed_helper). +-import(distributed_helper, [mim/0, rpc_spec/1, rpc/4]). %%%=================================================================== %%% Suite configuration @@ -34,8 +36,9 @@ all() -> {group, node1_tls_optional_node2_tls_false}, {group, node1_tls_false_node2_tls_required}, - {group, node1_tls_required_node2_tls_false} + {group, node1_tls_required_node2_tls_false}, + {group, dialback} ]. groups() -> @@ -57,16 +60,15 @@ groups() -> {node1_tls_optional_node2_tls_false, [], essentials()}, {node1_tls_false_node2_tls_required, [], negative()}, - {node1_tls_required_node2_tls_false, [], negative()}]. + {node1_tls_required_node2_tls_false, [], negative()}, + {dialback, [], [dialback_key_is_synchronized_on_different_nodes]}]. essentials() -> [simple_message]. -metrics() -> - [s2s_metrics_testing]. - all_tests() -> - [connections_info, nonexistent_user, unknown_domain, malformed_jid | essentials()]. + [connections_info, nonexistent_user, unknown_domain, malformed_jid, + dialback_with_wrong_key | essentials()]. negative() -> [timeout_waiting_for_message]. @@ -84,7 +86,7 @@ connection_cases() -> auth_with_valid_cert_fails_for_other_mechanism_than_external]. suite() -> - s2s_helper:suite(escalus:suite()). + distributed_helper:require_rpc_nodes([mim, mim2, fed]) ++ escalus:suite(). users() -> [alice2, alice, bob]. @@ -103,6 +105,9 @@ end_per_suite(Config) -> escalus:delete_users(Config, escalus:get_users(users())), escalus:end_per_suite(Config). +init_per_group(dialback, Config) -> + %% Tell mnesia that mim and mim2 nodes are clustered + distributed_helper:add_node_to_cluster(distributed_helper:mim2(), Config); init_per_group(GroupName, Config) -> s2s_helper:configure_s2s(GroupName, Config). @@ -159,14 +164,12 @@ connections_info(Config) -> [_ | _] = get_s2s_connections(?dh:mim(), FedDomain, out), ok. -get_s2s_connections(RPCSpec, Domain, Type)-> - AllS2SConnections = ?dh:rpc(RPCSpec, ejabberd_s2s, get_info_s2s_connections, [Type]), - % ct:pal("Node = ~p, ConnectionType = ~p~nAllS2SConnections(~p): ~p", - % [maps:get(node, RPCSpec), Type, length(AllS2SConnections), AllS2SConnections]), +get_s2s_connections(RPCSpec, Domain, Type) -> + AllS2SConnections = ?dh:rpc(RPCSpec, mongoose_s2s_info, get_connections, [Type]), DomainS2SConnections = [Connection || Connection <- AllS2SConnections, - Type =/= in orelse [Domain] =:= proplists:get_value(domains, Connection), - Type =/= out orelse Domain =:= proplists:get_value(server, Connection)], + Type =/= in orelse [Domain] =:= maps:get(domains, Connection), + Type =/= out orelse Domain =:= maps:get(server, Connection)], ct:pal("Node = ~p, ConnectionType = ~p, Domain = ~s~nDomainS2SConnections(~p): ~p", [maps:get(node, RPCSpec), Type, Domain, length(DomainS2SConnections), DomainS2SConnections]), @@ -215,6 +218,23 @@ malformed_jid(Config) -> end). +dialback_with_wrong_key(_Config) -> + HostType = domain_helper:host_type(mim), + MimDomain = domain_helper:domain(mim), + FedDomain = domain_helper:domain(fed), + FromTo = {MimDomain, FedDomain}, + Key = <<"123456">>, %% wrong key + StreamId = <<"sdfdsferrr">>, + StartType = {verify, self(), Key, StreamId}, + {ok, _} = rpc(rpc_spec(mim), ejabberd_s2s_out, start, [FromTo, StartType]), + receive + %% Remote server (fed1) rejected out request + {'$gen_event', {validity_from_s2s_out, false, FromTo}} -> + ok + after 5000 -> + ct:fail(timeout) + end. + nonascii_addr(Config) -> escalus:fresh_story(Config, [{alice, 1}, {bob2, 1}], fun(Alice, Bob) -> @@ -422,3 +442,29 @@ get_main_key_and_cert_files(Config) -> get_main_file_path(Config, File) -> filename:join([path_helper:repo_dir(Config), "tools", "ssl", "mongooseim", File]). + +dialback_key_is_synchronized_on_different_nodes(_Config) -> + configure_secret_and_restart_s2s(mim), + configure_secret_and_restart_s2s(mim2), + Key1 = get_shared_secret(mim), + Key2 = get_shared_secret(mim2), + ?assertEqual(Key1, Key2), + %% Node 2 is restarted later, so both nodes should have the key. + ?assertEqual(Key2, {ok, <<"9e438f25e81cf347100b">>}). + +get_shared_secret(NodeKey) -> + HostType = domain_helper:host_type(mim), + rpc(rpc_spec(NodeKey), mongoose_s2s_backend, get_shared_secret, [HostType]). + +set_opt(Spec, Opt, Value) -> + rpc(Spec, mongoose_config, set_opt, [Opt, Value]). + +configure_secret_and_restart_s2s(NodeKey) -> + HostType = domain_helper:host_type(mim), + Spec = rpc_spec(NodeKey), + set_opt(Spec, [{s2s, HostType}, shared], shared_secret(NodeKey)), + ok = rpc(Spec, supervisor, terminate_child, [ejabberd_sup, ejabberd_s2s]), + {ok, _} = rpc(Spec, supervisor, restart_child, [ejabberd_sup, ejabberd_s2s]). + +shared_secret(mim) -> <<"f623e54a0741269be7dd">>; %% Some random key +shared_secret(mim2) -> <<"9e438f25e81cf347100b">>. diff --git a/big_tests/tests/s2s_helper.erl b/big_tests/tests/s2s_helper.erl index 539a83f81b7..69b7c338657 100644 --- a/big_tests/tests/s2s_helper.erl +++ b/big_tests/tests/s2s_helper.erl @@ -1,15 +1,11 @@ -module(s2s_helper). --export([suite/1]). -export([init_s2s/1]). -export([end_s2s/1]). -export([configure_s2s/2]). --import(distributed_helper, [fed/0, mim/0, rpc_spec/1, require_rpc_nodes/1, rpc/4]). +-import(distributed_helper, [rpc_spec/1, rpc/4]). -import(domain_helper, [host_type/1]). -suite(Config) -> - require_rpc_nodes(node_keys()) ++ Config. - init_s2s(Config) -> [{{s2s, NodeKey}, get_s2s_opts(NodeKey)} || NodeKey <- node_keys()] ++ [{escalus_user_db, xmpp} | Config]. diff --git a/big_tests/tests/sm_SUITE.erl b/big_tests/tests/sm_SUITE.erl index 99bae20b4f2..e19d4cdea16 100644 --- a/big_tests/tests/sm_SUITE.erl +++ b/big_tests/tests/sm_SUITE.erl @@ -144,7 +144,7 @@ init_per_group(Group, Config) when Group =:= parallel_unacknowledged_message_hoo Group =:= manual_ack_freq_long_session_timeout; Group =:= parallel_manual_ack_freq_1; Group =:= manual_ack_freq_2 -> - dynamic_modules:ensure_modules(host_type(), required_modules(group, Group)), + dynamic_modules:ensure_modules(host_type(), required_modules(Config, group, Group)), Config; init_per_group(stale_h, Config) -> Config; @@ -153,18 +153,18 @@ init_per_group(stream_mgmt_disabled, Config) -> rpc(mim(), mnesia, delete_table, [sm_session]), Config; init_per_group(Group, Config) -> - dynamic_modules:ensure_modules(host_type(), required_modules(group, Group)), + dynamic_modules:ensure_modules(host_type(), required_modules(Config, group, Group)), Config. end_per_group(_Group, _Config) -> ok. init_per_testcase(resume_expired_session_returns_correct_h = CN, Config) -> - dynamic_modules:ensure_modules(host_type(), required_modules(testcase, CN)), + dynamic_modules:ensure_modules(host_type(), required_modules(Config, testcase, CN)), escalus:init_per_testcase(CN, Config); init_per_testcase(CN, Config) when CN =:= gc_repeat_after_never_means_no_cleaning; CN =:= gc_repeat_after_timeout_does_clean -> - dynamic_modules:ensure_modules(host_type(), required_modules(testcase, CN)), + dynamic_modules:ensure_modules(host_type(), required_modules(Config, testcase, CN)), Config2 = register_some_smid_h(Config), escalus:init_per_testcase(CN, Config2); init_per_testcase(server_requests_ack_freq_2 = CN, Config) -> @@ -189,13 +189,14 @@ end_per_testcase(CaseName, Config) -> %% Module configuration per group (in case of stale_h group it is per testcase) -required_modules(Scope, Name) -> +required_modules(Config, Scope, Name) -> SMConfig = case required_sm_opts(Scope, Name) of stopped -> stopped; - ExtraOpts -> maps:merge(common_sm_opts(), ExtraOpts) + ExtraOpts -> maps:merge(common_sm_opts(Config), ExtraOpts) end, + Backend = mongoose_helper:mnesia_or_rdbms_backend(), [{mod_stream_management, config_parser_helper:mod_config(mod_stream_management, SMConfig)}, - {mod_offline, config_parser_helper:mod_config(mod_offline, #{})}]. + {mod_offline, config_parser_helper:mod_config(mod_offline, #{backend => Backend})}]. required_sm_opts(group, parallel) -> #{ack_freq => never}; @@ -219,8 +220,9 @@ required_sm_opts(testcase, gc_repeat_after_never_means_no_cleaning) -> required_sm_opts(testcase, gc_repeat_after_timeout_does_clean) -> #{stale_h => stale_h(?SHORT_TIMEOUT, ?SHORT_TIMEOUT)}. -common_sm_opts() -> - #{buffer_max => ?SMALL_SM_BUFFER}. +common_sm_opts(Config) -> + Backend = ct_helper:get_preset_var(Config, stream_management_backend, mnesia), + #{buffer_max => ?SMALL_SM_BUFFER, backend => Backend}. stale_h(RepeatAfter, Geriatric) -> #{enabled => true, diff --git a/big_tests/tests/start_node_id_SUITE.erl b/big_tests/tests/start_node_id_SUITE.erl new file mode 100644 index 00000000000..368e43ec9c6 --- /dev/null +++ b/big_tests/tests/start_node_id_SUITE.erl @@ -0,0 +1,55 @@ +-module(start_node_id_SUITE). +-compile([export_all, nowarn_export_all]). + +-import(distributed_helper, [mim/0, rpc/4]). +-include_lib("common_test/include/ct.hrl"). + +%%-------------------------------------------------------------------- +%% Suite configuration +%%-------------------------------------------------------------------- + +all() -> + [{group, all}]. + +groups() -> + [{all, [], cases()}]. + +cases() -> + [cleaning_works]. + +suite() -> + escalus:suite(). + +%%-------------------------------------------------------------------- +%% Init & teardown +%%-------------------------------------------------------------------- +init_per_suite(Config) -> + escalus:init_per_suite(Config). + +end_per_suite(Config) -> + escalus:end_per_suite(Config). + +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(CaseName, Config) -> + escalus:init_per_testcase(CaseName, Config). + +end_per_testcase(CaseName, Config) -> + escalus:end_per_testcase(CaseName, Config). + +%%-------------------------------------------------------------------- +%% Test cases +%%-------------------------------------------------------------------- + +cleaning_works(Config) -> + Id = <<"someid139455">>, + Pid = spawn_link(fun() -> receive stop -> ok end end), + ok = rpc(mim(), mongoose_start_node_id, register_on_remote_node_rpc, [node(), Id, Pid]), + GetF = fun() -> rpc(mim(), mongoose_start_node_id, node_id_to_name, [Id]) end, + mongoose_helper:wait_until(GetF, {ok, node()}), + Pid ! stop, + mongoose_helper:wait_until(GetF, {error, unknown_id}). diff --git a/big_tests/tests/vcard_simple_SUITE.erl b/big_tests/tests/vcard_simple_SUITE.erl index ecf1f9c3894..4ea297e73a5 100644 --- a/big_tests/tests/vcard_simple_SUITE.erl +++ b/big_tests/tests/vcard_simple_SUITE.erl @@ -465,7 +465,8 @@ prepare_vcard_module(Config) -> %% Keep the old config, so we can undo our changes, once finished testing Config1 = dynamic_modules:save_modules(host_types(), Config), %% Get a list of options, we can use as a prototype to start new modules - VCardOpts = config_parser_helper:default_mod_config(mod_vcard), + Backend = mongoose_helper:mnesia_or_rdbms_backend(), + VCardOpts = config_parser_helper:mod_config(mod_vcard, #{backend => Backend}), [{mod_vcard_opts, VCardOpts} | Config1]. restore_vcard_module(Config) -> diff --git a/big_tests/tests/xep_0352_csi_SUITE.erl b/big_tests/tests/xep_0352_csi_SUITE.erl index a991a1be68b..6ae55a7b5ce 100644 --- a/big_tests/tests/xep_0352_csi_SUITE.erl +++ b/big_tests/tests/xep_0352_csi_SUITE.erl @@ -37,8 +37,9 @@ suite() -> init_per_suite(Config) -> NewConfig = dynamic_modules:save_modules(host_type(), Config), + Backend = mongoose_helper:mnesia_or_rdbms_backend(), dynamic_modules:ensure_modules( - host_type(), [{mod_offline, default_mod_config(mod_offline)}, + host_type(), [{mod_offline, mod_config(mod_offline, #{backend => Backend})}, {mod_csi, mod_config(mod_csi, #{buffer_max => ?CSI_BUFFER_MAX})}]), [{escalus_user_db, {module, escalus_ejabberd}} | escalus:init_per_suite(NewConfig)]. diff --git a/doc/configuration/configuration-files.md b/doc/configuration/configuration-files.md index 35712ed2021..75b4e6ed366 100644 --- a/doc/configuration/configuration-files.md +++ b/doc/configuration/configuration-files.md @@ -15,6 +15,7 @@ The file is divided into the following sections: * [**general**](general.md) - Served XMPP domains, log level, server language and some other miscellaneous settings. * [**listen**](listen.md) - Configured listeners, receiving incoming XMPP and HTTP connections. * [**auth**](auth.md) - Supported client authentication methods and their options. +* [**internal_databases**](internal-databases.md) - Options for Mnesia and CETS. They are primarily used for clustering. * [**outgoing_pools**](outgoing-connections.md) - Outgoing connections to external services, including databases, message queues and HTTP services. * [**services**](Services.md) - Internal services like an administration API and system metrics. * [**modules**](Modules.md) - [XMPP extension](https://xmpp.org/extensions/) modules, which extend the basic functionality provided by XMPP. diff --git a/doc/configuration/general.md b/doc/configuration/general.md index ebddbdfc65c..a2ab620bbf7 100644 --- a/doc/configuration/general.md +++ b/doc/configuration/general.md @@ -142,7 +142,7 @@ According to RFC 6210, even when a client sends invalid data after opening a con These options can be used to configure the way MongooseIM manages user sessions. ### `general.sm_backend` -* **Syntax:** string, `"mnesia"` or `"redis"` +* **Syntax:** string, `"mnesia"`, `"cets"` or `"redis"` * **Default:** `"mnesia"` * **Example:** `sm_backend = "redis"` @@ -157,6 +157,15 @@ See the section about [redis connection setup](./outgoing-connections.md#redis-s When a user's session is replaced (due to a full JID conflict) by a new one, this parameter specifies the time MongooseIM waits for the old sessions to close. The default value is sufficient in most cases. If you observe `replaced_wait_timeout` warning in logs, then most probably the old sessions are frozen for some reason and it should be investigated. +## XMPP federation (S2S) + +### `general.s2s_backend` +* **Syntax:** string, `"mnesia"`, `"cets"` +* **Default:** `"mnesia"` +* **Example:** `s2s_backend = "cets"` + +Backend for replicating the list of outgoing Server to Server (S2S) connections across the nodes of the local MongooseIM cluster. + ## Message routing The following options influence the way MongooseIM routes incoming messages to their recipients. diff --git a/doc/configuration/internal-databases.md b/doc/configuration/internal-databases.md new file mode 100644 index 00000000000..91435a9e627 --- /dev/null +++ b/doc/configuration/internal-databases.md @@ -0,0 +1,67 @@ +Internal databases are used to cluster MongooseIM nodes, and to replicate session list data between them. + +Mnesia is a legacy way to cluster MongooseIM nodes. It is also could be used to store persistent data, but we recommend +to use RDBMS databases instead because of scalability and stability reasons. + +CETS is a new way to cluster MongooseIM nodes. +CETS needs to know a list of nodes for the node discovery. There are two ways to get a list of nodes: + +- A text file with a list of nodes on each line. It is useful when there is an external script to make this file based on + some custom logic (for example, a bash script that uses AWS CLI to discover instances in the autoscaling group). This file + would be automatilly reread on change. +- RDBMS database. MongooseIM would write into RDBMS its nodename and read a list of other nodes. It is pretty simple, but + RDBMS database could be a single point of failure. + +Section example: + +```toml +[internal_databases] + [internal_databases.mnesia] + + [internal_databases.cets] + backend = "rdbms" + cluster_name = "mongooseim" +``` + +or + +```toml +[internal_databases] + [internal_databases.cets] + backend = "file" + node_list_file = "cets_disco.txt" +``` + +To enable just CETS, define only `internal_databases.cets` section: + +```toml +[internal_databases] + [internal_databases.cets] +``` + +# CETS Options + +### `internal_databases.cets.backend` + +Backend for CETS discovery. + +* **Syntax:** string, one of `"rdbms"`, `"file"`. +* **Default:** `"rdbms"` +* **Example:** `backend = "rdbms"` + +### `internal_databases.cets.cluster_name` + +Namespace for the cluster. Only nodes with the same cluster name would be discoverd. This option is for RDBMS backend. + +* **Syntax:** string. +* **Default:** `"mongooseim"` +* **Example:** `cluster_name = "mongooseim"` + +### `internal_databases.cets.node_list_file` + +File to read a list of nodes from. Relative to the MongooseIM's release directory. This option is for the file backend. +Required, if `backend = "file"`. + +* **Syntax:** path. +* **Default:** not specified. +* **Example:** `node_list_file = "/etc/mim_nodes.txt"` diff --git a/doc/configuration/release-options.md b/doc/configuration/release-options.md index 3b99b1097f6..22937368cda 100644 --- a/doc/configuration/release-options.md +++ b/doc/configuration/release-options.md @@ -197,6 +197,13 @@ These options are inserted into the `rel/files/mongooseim.toml` template. * **Syntax:** string * **Example:** `{sm_backend, "\"redis\""}.` +### s2s_backend + +* **Type:** parameter +* **Option:** [`general.s2s_backend`](general.md#generals2s_backend) +* **Syntax:** string +* **Example:** `{s2s_backend, "\"mnesia\""}.` + ### tls_config * **Type:** block diff --git a/doc/modules/mod_bosh.md b/doc/modules/mod_bosh.md index d04aadab07c..935a56e4014 100644 --- a/doc/modules/mod_bosh.md +++ b/doc/modules/mod_bosh.md @@ -13,7 +13,7 @@ If you want to use BOSH, you must enable it both in the `listen` section of * **Default:** `"mnesia"` * **Example:** `backend = "mnesia"` -Backend to use for storing BOSH connections. Currently only `"mnesia"` is supported. +Backend to use for storing BOSH connections. `"cets"`, `"mnesia"` are supported. ### `modules.mod_bosh.inactivity` * **Syntax:** positive integer or the string `"infinity"` diff --git a/doc/modules/mod_jingle_sip.md b/doc/modules/mod_jingle_sip.md index fcf093eb90e..5100e5b642c 100644 --- a/doc/modules/mod_jingle_sip.md +++ b/doc/modules/mod_jingle_sip.md @@ -102,6 +102,13 @@ MongooseIM packages are built with Jingle/SIP support. ## Options +### `modules.mod_jingle_sip.backend` +* **Syntax:** string, `"mnesia"`, `"cets"` +* **Default:** `"mnesia"` +* **Example:** `backend = "cets"` + +Backend for in-memory data for this module. + ### `modules.mod_jingle_sip.proxy_host` * **Syntax:** string * **Default:** `"localhost"` diff --git a/include/mod_jingle_sip_session.hrl b/include/mod_jingle_sip_session.hrl new file mode 100644 index 00000000000..ed16350cdc7 --- /dev/null +++ b/include/mod_jingle_sip_session.hrl @@ -0,0 +1,15 @@ +%% Defineds record to store SIP session information +%% Type is in mod_jingle_sip_session:session() +-record(jingle_sip_session, { + %% SIP CallID + sid, + dialog, + state, + direction, + request, + node, + owner, + from, + to, + now, + meta}). diff --git a/include/mod_muc.hrl b/include/mod_muc.hrl index 45d5a6ade7b..ff8b2f99f5a 100644 --- a/include/mod_muc.hrl +++ b/include/mod_muc.hrl @@ -7,8 +7,3 @@ host_type, pid }). - --record(muc_registered, { - us_host, - nick - }). diff --git a/mkdocs.yml b/mkdocs.yml index 232d24249e4..c3cecf13b94 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -71,6 +71,7 @@ nav: - 'Options: General': 'configuration/general.md' - 'Options: Listen': 'configuration/listen.md' - 'Options: Auth': 'configuration/auth.md' + - 'Options: Internal Databases': 'configuration/internal-databases.md' - 'Options: Outgoing connections': 'configuration/outgoing-connections.md' - 'Options: Services': 'configuration/Services.md' - 'Options: Extension Modules': 'configuration/Modules.md' diff --git a/priv/graphql/schemas/admin/admin_schema.gql b/priv/graphql/schemas/admin/admin_schema.gql index 6f55d867275..59520901e49 100644 --- a/priv/graphql/schemas/admin/admin_schema.gql +++ b/priv/graphql/schemas/admin/admin_schema.gql @@ -39,6 +39,8 @@ type AdminQuery{ gdpr: GdprAdminQuery "Mnesia internal database management" mnesia: MnesiaAdminQuery + "CETS internal database management" + cets: CETSAdminQuery "Server info and management" server: ServerAdminQuery } diff --git a/priv/graphql/schemas/admin/cets.gql b/priv/graphql/schemas/admin/cets.gql new file mode 100644 index 00000000000..f9a8f8e74e4 --- /dev/null +++ b/priv/graphql/schemas/admin/cets.gql @@ -0,0 +1,17 @@ +"Allow admin to get information about CETS status" +type CETSAdminQuery @protected{ + "Get from the local node. Only for global admin" + systemInfo: [CETSInfo] + @protected(type: GLOBAL) +} + +type CETSInfo { + "ETS table name" + tableName: String + "Memory (in words)" + memory: Int + "Size (in records)" + size: Int + "A list of clustered nodes" + nodes: [String] +} diff --git a/priv/mssql2012.sql b/priv/mssql2012.sql index 351939358f7..92df0320e53 100644 --- a/priv/mssql2012.sql +++ b/priv/mssql2012.sql @@ -752,3 +752,12 @@ CREATE TABLE domain_events ( domain VARCHAR(250) NOT NULL ); CREATE INDEX i_domain_events_domain ON domain_events(domain); + +CREATE TABLE discovery_nodes ( + node_name varchar(250), + cluster_name varchar(250), + updated_timestamp BIGINT NOT NULL, -- in microseconds + node_num INT NOT NULL, + PRIMARY KEY (cluster_name, node_name) +); +CREATE UNIQUE INDEX i_discovery_nodes_node_num ON discovery_nodes(cluster_name, node_num); diff --git a/priv/mysql.sql b/priv/mysql.sql index f3f7ed00023..392dd0df636 100644 --- a/priv/mysql.sql +++ b/priv/mysql.sql @@ -544,3 +544,12 @@ CREATE TABLE domain_events ( domain VARCHAR(250) NOT NULL ); CREATE INDEX i_domain_events_domain ON domain_events(domain); + +CREATE TABLE discovery_nodes ( + node_name varchar(250), + cluster_name varchar(250), + updated_timestamp BIGINT NOT NULL, -- in microseconds + node_num INT UNSIGNED NOT NULL, + PRIMARY KEY (cluster_name, node_name) +); +CREATE UNIQUE INDEX i_discovery_nodes_node_num USING BTREE ON discovery_nodes(cluster_name, node_num); diff --git a/priv/pg.sql b/priv/pg.sql index d0525f57c84..58f64ec30ae 100644 --- a/priv/pg.sql +++ b/priv/pg.sql @@ -504,3 +504,12 @@ CREATE TABLE domain_events ( PRIMARY KEY(id) ); CREATE INDEX i_domain_events_domain ON domain_events(domain); + +CREATE TABLE discovery_nodes ( + node_name varchar(250), + cluster_name varchar(250), + updated_timestamp BIGINT NOT NULL, -- in microseconds + node_num INT NOT NULL, + PRIMARY KEY (cluster_name, node_name) +); +CREATE UNIQUE INDEX i_discovery_nodes_node_num ON discovery_nodes USING BTREE(cluster_name, node_num); diff --git a/rebar.config b/rebar.config index b2e4dd6b45c..080e81f6c49 100644 --- a/rebar.config +++ b/rebar.config @@ -80,6 +80,7 @@ {cache_tab, "1.0.30"}, {segmented_cache, "0.3.0"}, {worker_pool, "6.0.1"}, + {cets, {git, "https://github.com/esl/cets.git", {branch, "main"}}}, %%% HTTP tools {graphql, {git, "https://github.com/esl/graphql-erlang.git", {branch, "master"}}}, @@ -168,11 +169,17 @@ {erl_opts, [{d, 'PROD_NODE'}]} ]}, %% development nodes {mim1, [{relx, [ {overlay_vars, "rel/mim1.vars-toml.config"}, - {overlay, [{template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, + {overlay, [ + {copy, "rel/files/cets_disco.txt", "etc/cets_disco.txt"}, + {template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, {mim2, [{relx, [ {overlay_vars, "rel/mim2.vars-toml.config"}, - {overlay, [{template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, + {overlay, [ + {copy, "rel/files/cets_disco.txt", "etc/cets_disco.txt"}, + {template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, {mim3, [{relx, [ {overlay_vars, "rel/mim3.vars-toml.config"}, - {overlay, [{template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, + {overlay, [ + {copy, "rel/files/cets_disco.txt", "etc/cets_disco.txt"}, + {template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, {fed1, [{relx, [ {overlay_vars, "rel/fed1.vars-toml.config"}, {overlay, [{template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, {reg1, [{relx, [ {overlay_vars, "rel/reg1.vars-toml.config"}, diff --git a/rebar.lock b/rebar.lock index 68ad0062ab5..a2e9b3955c2 100644 --- a/rebar.lock +++ b/rebar.lock @@ -6,6 +6,10 @@ {<<"bear">>,{pkg,<<"bear">>,<<"1.0.0">>},1}, {<<"cache_tab">>,{pkg,<<"cache_tab">>,<<"1.0.30">>},0}, {<<"certifi">>,{pkg,<<"certifi">>,<<"2.9.0">>},1}, + {<<"cets">>, + {git,"https://github.com/esl/cets.git", + {ref,"9965e3b35f3776dff5879effaab538a0ab94592d"}}, + 0}, {<<"cowboy">>,{pkg,<<"cowboy">>,<<"2.9.0">>},0}, {<<"cowboy_swagger">>,{pkg,<<"cowboy_swagger">>,<<"2.5.1">>},0}, {<<"cowlib">>,{pkg,<<"cowlib">>,<<"2.11.0">>},1}, diff --git a/rel/fed1.vars-toml.config b/rel/fed1.vars-toml.config index b6e34e8d401..34c4b2420f3 100644 --- a/rel/fed1.vars-toml.config +++ b/rel/fed1.vars-toml.config @@ -17,6 +17,7 @@ %% "localhost" host should NOT be defined. {hosts, "\"fed1\""}. {default_server_domain, "\"fed1\""}. +{cluster_name, "fed"}. %% domain.example.com is for multitenancy preset, muc_SUITE:register_over_s2s {s2s_addr, "[[s2s.address]] diff --git a/rel/files/cets_disco.txt b/rel/files/cets_disco.txt new file mode 100644 index 00000000000..428fc58a86b --- /dev/null +++ b/rel/files/cets_disco.txt @@ -0,0 +1,3 @@ +mongooseim@localhost +ejabberd2@localhost +mongooseim3@localhost diff --git a/rel/files/mongooseim.toml b/rel/files/mongooseim.toml index eb6bc696879..dc1b652b99d 100644 --- a/rel/files/mongooseim.toml +++ b/rel/files/mongooseim.toml @@ -11,6 +11,12 @@ {{#sm_backend}} sm_backend = {{{sm_backend}}} {{/sm_backend}} + {{#component_backend}} + component_backend = {{{component_backend}}} + {{/component_backend}} + {{#s2s_backend}} + s2s_backend = {{{s2s_backend}}} + {{/s2s_backend}} max_fsm_queue = 1000 {{#http_server_name}} http_server_name = {{{http_server_name}}} @@ -162,6 +168,8 @@ {{{auth_method_opts}}}{{/auth_method_opts}} {{/auth_method}} +{{{internal_databases}}} + {{#outgoing_pools}} {{{outgoing_pools}}} {{/outgoing_pools}} @@ -244,6 +252,9 @@ {{{mod_vcard}}} {{/mod_vcard}} [modules.mod_bosh] + {{#bosh_backend}} + backend = {{{bosh_backend}}} + {{/bosh_backend}} [modules.mod_carboncopy] diff --git a/rel/mim1.vars-toml.config b/rel/mim1.vars-toml.config index 5fcea75ba4c..0cf2e6bbd8c 100644 --- a/rel/mim1.vars-toml.config +++ b/rel/mim1.vars-toml.config @@ -20,6 +20,7 @@ {hosts, "\"localhost\", \"anonymous.localhost\", \"localhost.bis\""}. {host_types, "\"test type\", \"dummy auth\", \"anonymous\""}. {default_server_domain, "\"localhost\""}. +{cluster_name, "mim"}. {mod_amp, ""}. {host_config, diff --git a/rel/mim2.vars-toml.config b/rel/mim2.vars-toml.config index 2581a209a45..758de03b341 100644 --- a/rel/mim2.vars-toml.config +++ b/rel/mim2.vars-toml.config @@ -18,6 +18,7 @@ {hosts, "\"localhost\", \"anonymous.localhost\", \"localhost.bis\""}. {host_types, "\"test type\", \"dummy auth\""}. {default_server_domain, "\"localhost\""}. +{cluster_name, "mim"}. {s2s_addr, "[[s2s.address]] host = \"localhost2\" ip_address = \"127.0.0.1\""}. diff --git a/rel/mim3.vars-toml.config b/rel/mim3.vars-toml.config index 645ea41e1cc..6e758440aa6 100644 --- a/rel/mim3.vars-toml.config +++ b/rel/mim3.vars-toml.config @@ -20,6 +20,7 @@ {hosts, "\"localhost\", \"anonymous.localhost\", \"localhost.bis\""}. {default_server_domain, "\"localhost\""}. +{cluster_name, "mim"}. {s2s_addr, "[[s2s.address]] host = \"localhost2\" diff --git a/rel/reg1.vars-toml.config b/rel/reg1.vars-toml.config index 4b5a4e4fea7..da485e138c5 100644 --- a/rel/reg1.vars-toml.config +++ b/rel/reg1.vars-toml.config @@ -21,6 +21,8 @@ %% "reg1" is a local host. {hosts, "\"reg1\", \"localhost\""}. {default_server_domain, "\"reg1\""}. +{cluster_name, "reg"}. + {s2s_addr, "[[s2s.address]] host = \"localhost\" ip_address = \"127.0.0.1\" diff --git a/rel/vars-toml.config b/rel/vars-toml.config index d17f75d1fcb..4d5e1b235b1 100644 --- a/rel/vars-toml.config +++ b/rel/vars-toml.config @@ -17,6 +17,8 @@ {http_api_client_endpoint, "port = {{ http_api_client_endpoint_port }}"}. {s2s_use_starttls, "\"optional\""}. {s2s_certfile, "\"priv/ssl/fake_server.pem\""}. +{internal_databases, "[internal_databases] + [internal_databases.mnesia]"}. "./configure.vars.config". diff --git a/src/cert_utils.erl b/src/cert_utils.erl index 4f9eed4bd5a..bc9724ba088 100644 --- a/src/cert_utils.erl +++ b/src/cert_utils.erl @@ -97,7 +97,7 @@ get_lserver_from_addr(V, UTF8) when is_binary(V); is_list(V) -> Val = convert_to_bin(V), case {jid:from_binary(Val), UTF8} of {#jid{luser = <<"">>, lserver = LD, lresource = <<"">>}, true} -> - case ejabberd_s2s:domain_utf8_to_ascii(LD) of + case mongoose_s2s_lib:domain_utf8_to_ascii(LD) of false -> []; PCLD -> [PCLD] end; diff --git a/src/component/mongoose_component.erl b/src/component/mongoose_component.erl new file mode 100644 index 00000000000..763436d7b40 --- /dev/null +++ b/src/component/mongoose_component.erl @@ -0,0 +1,173 @@ +-module(mongoose_component). +%% API +-export([has_component/1, + dirty_get_all_components/1, + register_components/4, + unregister_components/1, + lookup_component/1, + lookup_component/2]). + +-export([start/0, stop/0]). +-export([node_cleanup/3]). + +-include("mongoose.hrl"). +-include("jlib.hrl"). +-include("external_component.hrl"). + +-type domain() :: jid:server(). + +-type external_component() :: #external_component{domain :: domain(), + handler :: mongoose_packet_handler:t(), + node :: node(), + is_hidden :: boolean()}. + +-export_type([external_component/0]). + +% Not simple boolean() because is probably going to support third value in the future: only_hidden. +% Besides, it increases readability. +-type return_hidden() :: only_public | all. + +-export_type([return_hidden/0]). + +%%==================================================================== +%% API +%%==================================================================== + +start() -> + Backend = mongoose_config:get_opt(component_backend), + mongoose_component_backend:init(#{backend => Backend}), + gen_hook:add_handlers(hooks()). + +stop() -> + gen_hook:delete_handlers(hooks()). + +-spec hooks() -> [gen_hook:hook_tuple()]. +hooks() -> + [{node_cleanup, global, fun ?MODULE:node_cleanup/3, #{}, 90}]. + +-spec register_components(Domain :: [domain()], + Node :: node(), + Handler :: mongoose_packet_handler:t(), + AreHidden :: boolean()) -> {ok, [external_component()]} | {error, any()}. +register_components(Domains, Node, Handler, AreHidden) -> + try + register_components_unsafe(Domains, Node, Handler, AreHidden) + catch Class:Reason:Stacktrace -> + ?LOG_ERROR(#{what => component_register_failed, + class => Class, reason => Reason, stacktrace => Stacktrace}), + {error, Reason} + end. + +register_components_unsafe(Domains, Node, Handler, AreHidden) -> + LDomains = prepare_ldomains(Domains), + Components = make_components(LDomains, Node, Handler, AreHidden), + assert_can_register_components(Components), + register_components(Components), + %% We do it outside of Mnesia transaction + lists:foreach(fun run_register_hook/1, Components), + {ok, Components}. + +register_components(Components) -> + mongoose_component_backend:register_components(Components). + +make_components(LDomains, Node, Handler, AreHidden) -> + [make_record_component(LDomain, Handler, Node, AreHidden) || LDomain <- LDomains]. + +make_record_component(LDomain, Handler, Node, IsHidden) -> + #external_component{domain = LDomain, handler = Handler, + node = Node, is_hidden = IsHidden}. + +run_register_hook(#external_component{domain = LDomain, is_hidden = IsHidden}) -> + mongoose_hooks:register_subhost(LDomain, IsHidden), + ok. + +run_unregister_hook(#external_component{domain = LDomain}) -> + mongoose_hooks:unregister_subhost(LDomain), + ok. + +-spec unregister_components(Components :: [external_component()]) -> ok. +unregister_components(Components) -> + lists:foreach(fun run_unregister_hook/1, Components), + mongoose_component_backend:unregister_components(Components). + +assert_can_register_components(Components) -> + ConflictComponents = lists:filter(fun is_already_registered/1, Components), + ConflictDomains = records_to_domains(ConflictComponents), + case ConflictDomains of + [] -> + ok; + _ -> + error({routes_already_exist, ConflictDomains}) + end. + +records_to_domains(Components) -> + [LDomain || #external_component{domain = LDomain} <- Components]. + +%% Returns true if any component route is registered for the domain. +-spec has_component(jid:lserver()) -> boolean(). +has_component(Domain) -> + [] =/= lookup_component(Domain). + +%% @doc Check if the component/route is already registered somewhere. +-spec is_already_registered(external_component()) -> boolean(). +is_already_registered(#external_component{domain = LDomain, node = Node}) -> + has_dynamic_domains(LDomain) + orelse has_domain_route(LDomain) + orelse has_component_registered(LDomain, Node). + +has_dynamic_domains(LDomain) -> + {error, not_found} =/= mongoose_domain_api:get_host_type(LDomain). + +%% check that route for this domain is not already registered +has_domain_route(LDomain) -> + no_route =/= mongoose_router:lookup_route(LDomain). + +%% check that there is no component registered globally for this node +has_component_registered(LDomain, Node) -> + no_route =/= get_component(LDomain, Node). + +%% Find a component registered globally for this node (internal use) +get_component(LDomain, Node) -> + filter_component(lookup_component(LDomain), Node). + +filter_component([], _) -> + no_route; +filter_component([Comp|Tail], Node) -> + case Comp of + #external_component{node = Node} -> + Comp; + _ -> + filter_component(Tail, Node) + end. + +%% @doc Returns a list of components registered for this domain by any node, +%% the choice is yours. +-spec lookup_component(Domain :: jid:lserver()) -> [external_component()]. +lookup_component(Domain) -> + mongoose_component_backend:lookup_component(Domain). + +%% @doc Returns a list of components registered for this domain at the given node. +%% (must be only one, or nothing) +-spec lookup_component(Domain :: jid:lserver(), Node :: node()) -> [external_component()]. +lookup_component(Domain, Node) -> + mongoose_component_backend:lookup_component(Domain, Node). + +-spec dirty_get_all_components(return_hidden()) -> [jid:lserver()]. +dirty_get_all_components(ReturnHidden) -> + mongoose_component_backend:get_all_components(ReturnHidden). + +-spec node_cleanup(map(), map(), map()) -> {ok, map()}. +node_cleanup(Acc, #{node := Node}, _) -> + mongoose_component_backend:node_cleanup(Node), + {ok, maps:put(?MODULE, ok, Acc)}. + +prepare_ldomains(Domains) -> + LDomains = [jid:nameprep(Domain) || Domain <- Domains], + Zip = lists:zip(Domains, LDomains), + InvalidDomains = [Domain || {Domain, error} <- Zip], + case InvalidDomains of + [] -> + LDomains; + _ -> + error({invalid_domains, InvalidDomains}) + end. diff --git a/src/component/mongoose_component_backend.erl b/src/component/mongoose_component_backend.erl new file mode 100644 index 00000000000..f482147892c --- /dev/null +++ b/src/component/mongoose_component_backend.erl @@ -0,0 +1,65 @@ +-module(mongoose_component_backend). + +-type external_component() :: mongoose_component:external_component(). + +-callback init(map()) -> any(). + +-callback node_cleanup(node()) -> ok. + +-callback register_components(Components :: [external_component()]) -> ok. + +-callback unregister_components(Components :: [external_component()]) -> ok. + +-callback lookup_component(Domain :: jid:lserver()) -> [external_component()]. + +-callback lookup_component(Domain :: jid:lserver(), Node :: node()) -> [external_component()]. + +-callback get_all_components(ReturnHidden :: mongoose_component:return_hidden()) -> [jid:lserver()]. + +-export([init/1, + node_cleanup/1, + register_components/1, + unregister_components/1, + lookup_component/1, + lookup_component/2, + get_all_components/1]). + +-ignore_xref([behaviour_info/1]). + +-define(MAIN_MODULE, mongoose_component). + +-spec init(map()) -> any(). +init(Opts) -> + Args = [Opts], + mongoose_backend:init(global, ?MAIN_MODULE, [], Opts), + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +-spec node_cleanup(node()) -> ok. +node_cleanup(Node) -> + Args = [Node], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +-spec register_components(Components :: [external_component()]) -> ok. +register_components(Components) -> + Args = [Components], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +-spec unregister_components(Components :: [external_component()]) -> ok. +unregister_components(Components) -> + Args = [Components], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +-spec lookup_component(Domain :: jid:lserver()) -> [external_component()]. +lookup_component(Domain) -> + Args = [Domain], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +-spec lookup_component(Domain :: jid:lserver(), Node :: node()) -> [external_component()]. +lookup_component(Domain, Node) -> + Args = [Domain, Node], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +-spec get_all_components(ReturnHidden :: mongoose_component:return_hidden()) -> [jid:lserver()]. +get_all_components(ReturnHidden) -> + Args = [ReturnHidden], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). diff --git a/src/component/mongoose_component_cets.erl b/src/component/mongoose_component_cets.erl new file mode 100644 index 00000000000..06ef5670076 --- /dev/null +++ b/src/component/mongoose_component_cets.erl @@ -0,0 +1,42 @@ +-module(mongoose_component_cets). +-behaviour(mongoose_component_backend). + +-export([init/1, + node_cleanup/1, + register_components/1, + unregister_components/1, + lookup_component/1, + lookup_component/2, + get_all_components/1]). + +-include("external_component.hrl"). +-define(TABLE, cets_external_component). + +init(_) -> + cets:start(?TABLE, #{type => bag, keypos => 2}), + cets_discovery:add_table(mongoose_cets_discovery, ?TABLE). + +node_cleanup(Node) -> + ets:match_delete(?TABLE, #external_component{node = Node, _ = '_'}), + ok. + +register_components(Components) -> + cets:insert_many(?TABLE, Components), + ok. + +unregister_components(Components) -> + cets:delete_objects(?TABLE, Components), + ok. + +lookup_component(Domain) -> + ets:lookup(?TABLE, Domain). + +lookup_component(Domain, Node) -> + ets:match_object(?TABLE, #external_component{domain = Domain, node = Node, _ = '_'}). + +get_all_components(all) -> + MatchAll = {#external_component{ domain = '$1', _ = '_' }, [], ['$1']}, + ets:select(?TABLE, [MatchAll]); +get_all_components(only_public) -> + MatchNonHidden = {#external_component{ domain = '$1', is_hidden = false, _ = '_' }, [], ['$1']}, + ets:select(?TABLE, [MatchNonHidden]). diff --git a/src/component/mongoose_component_mnesia.erl b/src/component/mongoose_component_mnesia.erl new file mode 100644 index 00000000000..7911570248a --- /dev/null +++ b/src/component/mongoose_component_mnesia.erl @@ -0,0 +1,67 @@ +-module(mongoose_component_mnesia). +-behaviour(mongoose_component_backend). + +-export([init/1, + node_cleanup/1, + register_components/1, + unregister_components/1, + lookup_component/1, + lookup_component/2, + get_all_components/1]). + +-include("external_component.hrl"). + +init(_) -> + update_tables(), + %% add distributed service_component routes + mnesia:create_table(external_component, + [{attributes, record_info(fields, external_component)}, + {type, bag}, {ram_copies, [node()]}]), + mnesia:add_table_copy(external_component, node(), ram_copies). + +update_tables() -> + %% delete old schema + case catch mnesia:table_info(external_componenst, local_content) of + true -> + mnesia:delete_table(external_component); + _ -> + ok + end. + +node_cleanup(Node) -> + Entries = mnesia:dirty_match_object(external_component, + #external_component{node = Node, _ = '_'}), + [mnesia:dirty_delete_object(external_component, Entry) || Entry <- Entries], + ok. + +register_components(Components) -> + F = fun() -> + lists:foreach(fun mnesia:write/1, Components) + end, + case mnesia:transaction(F) of + {atomic, ok} -> ok; + {aborted, Reason} -> error({mnesia_aborted_write, Reason}) + end. + +unregister_components(Components) -> + F = fun() -> + lists:foreach(fun do_unregister_component/1, Components) + end, + {atomic, ok} = mnesia:transaction(F), + ok. + +do_unregister_component(Component) -> + ok = mnesia:delete_object(external_component, Component, write). + +lookup_component(Domain) -> + mnesia:dirty_read(external_component, Domain). + +lookup_component(Domain, Node) -> + mnesia:dirty_match_object(external_component, + #external_component{domain = Domain, node = Node, _ = '_'}). + +get_all_components(all) -> + mnesia:dirty_all_keys(external_component); +get_all_components(only_public) -> + MatchNonHidden = {#external_component{ domain = '$1', is_hidden = false, _ = '_' }, [], ['$1']}, + mnesia:dirty_select(external_component, [MatchNonHidden]). diff --git a/src/config/mongoose_config_spec.erl b/src/config/mongoose_config_spec.erl index a81061b11ff..13d963d2b64 100644 --- a/src/config/mongoose_config_spec.erl +++ b/src/config/mongoose_config_spec.erl @@ -90,6 +90,7 @@ root() -> <<"listen">> => Listen#section{include = always}, <<"auth">> => Auth#section{include = always}, <<"outgoing_pools">> => outgoing_pools(), + <<"internal_databases">> => internal_databases(), <<"services">> => services(), <<"modules">> => Modules#section{include = always}, <<"shaper">> => shaper(), @@ -170,6 +171,12 @@ general() -> <<"sm_backend">> => #option{type = atom, validate = {module, ejabberd_sm}, wrap = global_config}, + <<"component_backend">> => #option{type = atom, + validate = {module, mongoose_component}, + wrap = global_config}, + <<"s2s_backend">> => #option{type = atom, + validate = {module, mongoose_s2s}, + wrap = global_config}, <<"max_fsm_queue">> => #option{type = integer, validate = positive, wrap = global_config}, @@ -209,6 +216,8 @@ general_defaults() -> <<"language">> => <<"en">>, <<"all_metrics_are_global">> => false, <<"sm_backend">> => mnesia, + <<"component_backend">> => mnesia, + <<"s2s_backend">> => mnesia, <<"rdbms_server_type">> => generic, <<"mongooseimctl_access_commands">> => #{}, <<"routing_modules">> => mongoose_router:default_routing_modules(), @@ -428,6 +437,32 @@ auth_password() -> include = always }. +%% path: internal_databases +internal_databases() -> + Items = #{<<"cets">> => internal_database_cets(), + <<"mnesia">> => internal_database_mnesia()}, + #section{items = Items, + format_items = map, + wrap = global_config, + include = always}. + +%% path: internal_databases.*.* +internal_database_cets() -> + #section{ + items = #{<<"backend">> => #option{type = atom, + validate = {enum, [file, rdbms]}}, + <<"cluster_name">> => #option{type = atom, validate = non_empty}, + %% Relative to the release directory (or an absolute name) + <<"node_list_file">> => #option{type = string, + validate = filename} + }, + defaults = #{<<"backend">> => rdbms, <<"cluster_name">> => mongooseim} + }. + +%% path: internal_databases.*.* +internal_database_mnesia() -> + #section{}. + %% path: outgoing_pools outgoing_pools() -> PoolTypes = [<<"cassandra">>, <<"elastic">>, <<"http">>, <<"ldap">>, diff --git a/src/ejabberd_app.erl b/src/ejabberd_app.erl index ae5dbee40a3..c1ede1526be 100644 --- a/src/ejabberd_app.erl +++ b/src/ejabberd_app.erl @@ -40,16 +40,27 @@ %%% start(normal, _Args) -> + try + do_start() + catch Class:Reason:StackTrace -> + %% Log a stacktrace because while proc_lib:crash_report/4 would report a crash reason, + %% it would not report the stacktrace + ?LOG_CRITICAL(#{what => app_failed_to_start, + class => Class, reason => Reason, stacktrace => StackTrace}), + erlang:raise(Class, Reason, StackTrace) + end; +start(_, _) -> + {error, badarg}. + +do_start() -> mongoose_fips:notify(), write_pid_file(), update_status_file(starting), mongoose_config:start(), mongoose_metrics:init(), db_init(), - mongoose_graphql:init(), translate:start(), - ejabberd_node_id:start(), ejabberd_commands:init(), mongoose_graphql_commands:start(), mongoose_router:start(), @@ -71,9 +82,7 @@ start(normal, _Args) -> mongoose_metrics:init_mongooseim_metrics(), update_status_file(started), ?LOG_NOTICE(#{what => mongooseim_node_started, version => ?MONGOOSE_VERSION, node => node()}), - Sup; -start(_, _) -> - {error, badarg}. + Sup. %% @doc Prepare the application for termination. %% This function is called when an application is about to be stopped, @@ -96,6 +105,9 @@ stop(_State) -> ?LOG_NOTICE(#{what => mongooseim_node_stopped, version => ?MONGOOSE_VERSION, node => node()}), delete_pid_file(), update_status_file(stopped), + %% We cannot stop other applications inside of the stop callback + %% (because we would deadlock the application controller process). + %% That is why we call mnesia:stop() inside of db_init_mnesia() instead. ok. @@ -103,14 +115,25 @@ stop(_State) -> %%% Internal functions %%% db_init() -> + case mongoose_config:lookup_opt([internal_databases, mnesia]) of + {ok, _} -> + db_init_mnesia(), + mongoose_node_num_mnesia:init(); + {error, _} -> + ok + end. + +db_init_mnesia() -> + %% Mnesia should not be running at this point, unless it is started by tests. + %% Ensure Mnesia is stopped + mnesia:stop(), case mnesia:system_info(extra_db_nodes) of [] -> - application:stop(mnesia), - mnesia:create_schema([node()]), - application:start(mnesia, permanent); + mnesia:create_schema([node()]); _ -> ok end, + application:start(mnesia, permanent), mnesia:wait_for_tables(mnesia:system_info(local_tables), infinity). -spec broadcast_c2s_shutdown_listeners() -> ok. diff --git a/src/ejabberd_local.erl b/src/ejabberd_local.erl index d7b7787c114..41dc2cbf732 100644 --- a/src/ejabberd_local.erl +++ b/src/ejabberd_local.erl @@ -423,23 +423,21 @@ do_unregister_host(Host) -> make_iq_id() -> %% Attach NodeId, so we know to which node to forward the response - {ok, NodeId} = ejabberd_node_id:node_id(), + BinNodeId = mongoose_start_node_id:node_id(), Rand = mongoose_bin:gen_from_crypto(), - <<(integer_to_binary(NodeId))/binary, "_", Rand/binary>>. + <>. %% Parses ID, made by make_iq_id function -spec parse_iq_id(ID :: binary()) -> local_node | {remote_node, node()} | {error, {unknown_node_id, term()} | bad_iq_format}. parse_iq_id(ID) -> - {ok, NodeId} = ejabberd_node_id:node_id(), - BinNodeId = integer_to_binary(NodeId), + BinNodeId = mongoose_start_node_id:node_id(), case binary:split(ID, <<"_">>) of [BinNodeId, _Rest] -> local_node; [OtherBinNodeId, _Rest] -> - OtherNodeId = binary_to_integer(OtherBinNodeId), - case ejabberd_node_id:node_id_to_name(OtherNodeId) of + case mongoose_start_node_id:node_id_to_name(OtherBinNodeId) of {ok, NodeName} -> {remote_node, NodeName}; {error, Reason} -> diff --git a/src/ejabberd_node_id.erl b/src/ejabberd_node_id.erl deleted file mode 100644 index 0f0d84c80e6..00000000000 --- a/src/ejabberd_node_id.erl +++ /dev/null @@ -1,70 +0,0 @@ -%%% @doc Allocates unique ids for each node. --module(ejabberd_node_id). --export([start/0, node_id/0, node_id_to_name/1]). - - --include("mongoose.hrl"). --include("jlib.hrl"). - --type nodeid() :: non_neg_integer(). --record(node, {name :: atom(), - id :: nodeid() - }). - -start() -> - mnesia:create_table(node, - [{ram_copies, [node()]}, - {type, set}, - {attributes, record_info(fields, node)}]), - mnesia:add_table_copy(node, node(), ram_copies), - mnesia:add_table_index(node, id), - register_node(node()), - ok. - --spec register_node(atom()) -> 'ok'. -register_node(NodeName) -> - {atomic, _} = mnesia:transaction(fun() -> - case mnesia:read(node, NodeName) of - [] -> - mnesia:write(#node{name = NodeName, id = next_node_id()}); - [_] -> ok - end - end), - ok. - -%% @doc Return an integer node ID. --spec node_id() -> {ok, nodeid()}. -node_id() -> - %% Save result into the process's memory space. - case get(node_id) of - undefined -> - {ok, NodeId} = select_node_id(node()), - put(node_id, NodeId), - {ok, NodeId}; - NodeId -> - {ok, NodeId} - end. - -node_id_to_name(ID) -> - case mnesia:dirty_index_read(node, ID, #node.id) of - [] -> - {error, unknown_id}; - [#node{name = Name}] -> - {ok, Name} - end. - --spec next_node_id() -> nodeid(). -next_node_id() -> - max_node_id() + 1. - --spec max_node_id() -> nodeid(). -max_node_id() -> - mnesia:foldl(fun(#node{id=Id}, Max) -> max(Id, Max) end, 0, node). - --spec select_node_id(NodeName :: atom() - ) -> {'error', 'not_found'} | {'ok', nodeid()}. -select_node_id(NodeName) -> - case mnesia:dirty_read(node, NodeName) of - [#node{id=Id}] -> {ok, Id}; - [] -> {error, not_found} - end. diff --git a/src/ejabberd_router.erl b/src/ejabberd_router.erl index 031f2a9dbf5..7a881412808 100644 --- a/src/ejabberd_router.erl +++ b/src/ejabberd_router.erl @@ -31,55 +31,20 @@ -export([route/3, route/4, route_error/4, - route_error_reply/4, - is_component_dirty/1, - dirty_get_all_components/1, - register_components/2, - register_components/3, - register_components/4, - register_component/2, - register_component/3, - register_component/4, - lookup_component/1, - lookup_component/2, - unregister_component/1, - unregister_component/2, - unregister_components/1, - unregister_components/2 - ]). + route_error_reply/4]). -export([start_link/0]). --export([routes_cleanup_on_nodedown/3]). %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). -%% debug exports for tests --export([update_tables/0]). - --ignore_xref([register_component/2, register_component/3, register_component/4, - register_components/2, register_components/3, route_error/4, start_link/0, - unregister_component/1, unregister_component/2, unregister_components/2, - unregister_routes/1, update_tables/0]). +-ignore_xref([route_error/4, start_link/0]). -include("mongoose.hrl"). -include("jlib.hrl"). --include("external_component.hrl"). -record(state, {}). --type domain() :: binary(). - --type external_component() :: #external_component{domain :: domain(), - handler :: mongoose_packet_handler:t(), - is_hidden :: boolean()}. - -% Not simple boolean() because is probably going to support third value in the future: only_hidden. -% Besides, it increases readability. --type return_hidden() :: only_public | all. - --export_type([return_hidden/0]). - %%==================================================================== %% API %%==================================================================== @@ -164,191 +129,13 @@ route_error_reply(From, To, Acc, Error) -> {Acc1, ErrorReply} = jlib:make_error_reply(Acc, Error), route_error(From, To, Acc1, ErrorReply). - --spec register_components([Domain :: domain()], - Handler :: mongoose_packet_handler:t()) -> ok | {error, any()}. -register_components(Domains, Handler) -> - register_components(Domains, node(), Handler). - --spec register_components([Domain :: domain()], - Node :: node(), - Handler :: mongoose_packet_handler:t()) -> ok | {error, any()}. -register_components(Domains, Node, Handler) -> - register_components(Domains, Node, Handler, false). - --spec register_components([Domain :: domain()], - Node :: node(), - Handler :: mongoose_packet_handler:t(), - AreHidden :: boolean()) -> ok | {error, any()}. -register_components(Domains, Node, Handler, AreHidden) -> - LDomains = [{jid:nameprep(Domain), Domain} || Domain <- Domains], - F = fun() -> - [do_register_component(LDomain, Handler, Node, AreHidden) || LDomain <- LDomains], - ok - end, - case mnesia:transaction(F) of - {atomic, ok} -> ok; - {aborted, Reason} -> {error, Reason} - end. - -%% @doc -%% components are registered in two places: external_components table as local components -%% and external_components_global as globals. Registration should be done by register_component/1 -%% or register_components/1, which registers them for current node; the arity 2 funcs are -%% here for testing. --spec register_component(Domain :: domain(), - Handler :: mongoose_packet_handler:t()) -> ok | {error, any()}. -register_component(Domain, Handler) -> - register_component(Domain, node(), Handler). - --spec register_component(Domain :: domain(), - Node :: node(), - Handler :: mongoose_packet_handler:t()) -> ok | {error, any()}. -register_component(Domain, Node, Handler) -> - register_component(Domain, Node, Handler, false). - --spec register_component(Domain :: domain(), - Node :: node(), - Handler :: mongoose_packet_handler:t(), - IsHidden :: boolean()) -> ok | {error, any()}. -register_component(Domain, Node, Handler, IsHidden) -> - register_components([Domain], Node, Handler, IsHidden). - -do_register_component({error, Domain}, _Handler, _Node, _IsHidden) -> - error({invalid_domain, Domain}); -do_register_component({LDomain, _}, Handler, Node, IsHidden) -> - case check_component(LDomain, Node) of - ok -> - ComponentGlobal = #external_component{domain = LDomain, handler = Handler, - node = Node, is_hidden = IsHidden}, - mnesia:write(external_component_global, ComponentGlobal, write), - NDomain = {LDomain, Node}, - Component = #external_component{domain = NDomain, handler = Handler, - node = Node, is_hidden = IsHidden}, - mnesia:write(Component), - mongoose_hooks:register_subhost(LDomain, IsHidden); - _ -> mnesia:abort(route_already_exists) - end. - -%% @doc Check if the component/route is already registered somewhere; ok means it is not, so we are -%% ok to proceed, anything else means the domain/node pair is already serviced. -%% true and false are there because that's how orelse works. --spec check_component(binary(), Node :: node()) -> ok | error. -check_component(LDomain, Node) -> - case check_dynamic_domains(LDomain) - orelse check_component_route(LDomain) - orelse check_component_local(LDomain, Node) - orelse check_component_global(LDomain, Node) of - true -> error; - false -> ok - end. - -check_dynamic_domains(LDomain)-> - {error, not_found} =/= mongoose_domain_api:get_host_type(LDomain). - -%% check that route for this domain is not already registered -check_component_route(LDomain) -> - no_route =/= mongoose_router:lookup_route(LDomain). - -%% check that there is no local component for domain:node pair -check_component_local(LDomain, Node) -> - NDomain = {LDomain, Node}, - [] =/= mnesia:read(external_component, NDomain). - -%% check that there is no component registered globally for this node -check_component_global(LDomain, Node) -> - undefined =/= get_global_component(LDomain, Node). - -%% Find a component registered globally for this node (internal use) -get_global_component([], _) -> - undefined; -get_global_component([Comp|Tail], Node) -> - case Comp of - #external_component{node = Node} -> - Comp; - _ -> - get_global_component(Tail, Node) - end; -get_global_component(LDomain, Node) -> - get_global_component(mnesia:read(external_component_global, LDomain), Node). - - --spec unregister_components([Domains :: domain()]) -> {atomic, ok}. -unregister_components(Domains) -> - unregister_components(Domains, node()). --spec unregister_components([Domains :: domain()], Node :: node()) -> {atomic, ok}. -unregister_components(Domains, Node) -> - LDomains = [{jid:nameprep(Domain), Domain} || Domain <- Domains], - F = fun() -> - [do_unregister_component(LDomain, Node) || LDomain <- LDomains], - ok - end, - {atomic, ok} = mnesia:transaction(F). - -do_unregister_component({error, Domain}, _Node) -> - error({invalid_domain, Domain}); -do_unregister_component({LDomain, _}, Node) -> - case get_global_component(LDomain, Node) of - undefined -> - ok; - Comp -> - ok = mnesia:delete_object(external_component_global, Comp, write) - end, - ok = mnesia:delete({external_component, {LDomain, Node}}), - mongoose_hooks:unregister_subhost(LDomain), - ok. - --spec unregister_component(Domain :: domain()) -> {atomic, ok}. -unregister_component(Domain) -> - unregister_components([Domain]). - --spec unregister_component(Domain :: domain(), Node :: node()) -> {atomic, ok}. -unregister_component(Domain, Node) -> - unregister_components([Domain], Node). - -%% @doc Returns a list of components registered for this domain by any node, -%% the choice is yours. --spec lookup_component(Domain :: jid:lserver()) -> [external_component()]. -lookup_component(Domain) -> - mnesia:dirty_read(external_component_global, Domain). - -%% @doc Returns a list of components registered for this domain at the given node. -%% (must be only one, or nothing) --spec lookup_component(Domain :: jid:lserver(), Node :: node()) -> [external_component()]. -lookup_component(Domain, Node) -> - mnesia:dirty_read(external_component, {Domain, Node}). - --spec dirty_get_all_components(return_hidden()) -> [jid:lserver()]. -dirty_get_all_components(all) -> - mnesia:dirty_all_keys(external_component_global); -dirty_get_all_components(only_public) -> - MatchNonHidden = {#external_component{ domain = '$1', is_hidden = false, _ = '_' }, [], ['$1']}, - mnesia:dirty_select(external_component_global, [MatchNonHidden]). - --spec is_component_dirty(jid:lserver()) -> boolean(). -is_component_dirty(Domain) -> - [] =/= lookup_component(Domain). - %%==================================================================== %% gen_server callbacks %%==================================================================== init([]) -> - update_tables(), - - %% add distributed service_component routes - mnesia:create_table(external_component, - [{attributes, record_info(fields, external_component)}, - {local_content, true}]), - mnesia:add_table_copy(external_component, node(), ram_copies), - mnesia:create_table(external_component_global, - [{attributes, record_info(fields, external_component)}, - {type, bag}, - {record_name, external_component}]), - mnesia:add_table_copy(external_component_global, node(), ram_copies), mongoose_metrics:ensure_metric(global, routingErrors, spiral), - gen_hook:add_handlers(hooks()), - + mongoose_component:start(), {ok, #state{}}. handle_call(_Request, _From, State) -> @@ -362,7 +149,7 @@ handle_info(_Info, State) -> {noreply, State}. terminate(_Reason, _State) -> - gen_hook:delete_handlers(hooks()), + mongoose_component:stop(), ok. code_change(_OldVsn, State, _Extra) -> @@ -371,9 +158,6 @@ code_change(_OldVsn, State, _Extra) -> %%-------------------------------------------------------------------- %%% Internal functions %%-------------------------------------------------------------------- --spec hooks() -> [gen_hook:hook_tuple()]. -hooks() -> - [{node_cleanup, global, fun ?MODULE:routes_cleanup_on_nodedown/3, #{}, 90}]. -spec route(From :: jid:jid(), To :: jid:jid(), @@ -407,32 +191,3 @@ route(OrigFrom, OrigTo, Acc0, OrigPacket, [M|Tail]) -> class => Class, reason => Reason, stacktrace => Stacktrace}), mongoose_acc:append(router, result, {error, {M, Reason}}, Acc0) end. - -update_tables() -> - case catch mnesia:table_info(external_component, attributes) of - [domain, handler, node] -> - mnesia:delete_table(external_component); - [domain, handler, node, is_hidden] -> - ok; - {'EXIT', _} -> - ok - end, - case catch mnesia:table_info(external_component_global, attributes) of - [domain, handler, node] -> - UpdateFun = fun({external_component, Domain, Handler, Node}) -> - {external_component, Domain, Handler, Node, false} - end, - mnesia:transform_table(external_component_global, UpdateFun, - [domain, handler, node, is_hidden]); - [domain, handler, node, is_hidden] -> - ok; - {'EXIT', _} -> - ok - end. - --spec routes_cleanup_on_nodedown(map(), map(), map()) -> {ok, map()}. -routes_cleanup_on_nodedown(Acc, #{node := Node}, _) -> - Entries = mnesia:dirty_match_object(external_component_global, - #external_component{node = Node, _ = '_'}), - [mnesia:dirty_delete_object(external_component_global, Entry) || Entry <- Entries], - {ok, maps:put(?MODULE, ok, Acc)}. diff --git a/src/ejabberd_s2s.erl b/src/ejabberd_s2s.erl index 8ff94cff6e9..1bc0c324676 100644 --- a/src/ejabberd_s2s.erl +++ b/src/ejabberd_s2s.erl @@ -26,27 +26,17 @@ -module(ejabberd_s2s). -author('alexey@process-one.net'). --xep([{xep, 185}, {version, "1.0"}]). - -behaviour(gen_server). -behaviour(xmpp_router). -%% API +%% API functions -export([start_link/0, filter/4, route/4, - have_connection/1, key/3, - get_connections_pids/1, try_register/1, - remove_connection/2, - find_connection/2, - dirty_get_connections/0, - allow_host/2, - domain_utf8_to_ascii/1, - timeout/0, - lookup_certfile/1 - ]). + get_s2s_out_pids/1, + remove_connection/2]). %% Hooks callbacks -export([node_cleanup/3]). @@ -55,41 +45,32 @@ -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). -%% ejabberd API --export([get_info_s2s_connections/1]). - --ignore_xref([dirty_get_connections/0, get_info_s2s_connections/1, have_connection/1, - incoming_s2s_number/0, outgoing_s2s_number/0, start_link/0]). +-ignore_xref([start_link/0]). -include("mongoose.hrl"). -include("jlib.hrl"). --include("ejabberd_commands.hrl"). - --define(DEFAULT_MAX_S2S_CONNECTIONS_NUMBER, 1). --define(DEFAULT_MAX_S2S_CONNECTIONS_NUMBER_PER_NODE, 1). - --type fromto() :: {'global' | jid:server(), jid:server()}. --record(s2s, { - fromto, - pid - }). --type s2s() :: #s2s{ - fromto :: fromto(), - pid :: pid() - }. --record(s2s_shared, { - host_type :: mongooseim:host_type(), - secret :: binary() - }). + +%% Pair of hosts {FromServer, ToServer}. +%% FromServer is the local server. +%% ToServer is the remote server. +%% Used in a lot of API and backend functions. +-type fromto() :: {jid:lserver(), jid:lserver()}. + +%% Pids for ejabberd_s2s_out servers +-type s2s_pids() :: [pid()]. + -record(state, {}). -%%==================================================================== -%% API -%%==================================================================== -%%-------------------------------------------------------------------- -%% Description: Starts the server -%%-------------------------------------------------------------------- --spec start_link() -> 'ignore' | {'error', _} | {'ok', pid()}. +-type base16_secret() :: binary(). +-type stream_id() :: binary(). +-type s2s_dialback_key() :: binary(). + +-export_type([fromto/0, s2s_pids/0, base16_secret/0, stream_id/0, s2s_dialback_key/0]). + +%% API functions + +%% Starts the server +-spec start_link() -> ignore | {error, _} | {ok, pid()}. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). @@ -99,172 +80,66 @@ filter(From, To, Acc, Packet) -> route(From, To, Acc, Packet) -> do_route(From, To, Acc, Packet). --spec remove_connection(_, pid()) -> 'ok' | {'aborted', _} | {'atomic', _}. -remove_connection(FromTo, Pid) -> - case catch mnesia:dirty_match_object(s2s, #s2s{fromto = FromTo, - pid = Pid}) of - [#s2s{pid = Pid}] -> - F = fun() -> - mnesia:delete_object(#s2s{fromto = FromTo, - pid = Pid}) - end, - mnesia:transaction(F); - _ -> - ok - end. - -have_connection(FromTo) -> - case catch mnesia:dirty_read(s2s, FromTo) of - [_] -> - true; - _ -> - false - end. - --spec get_connections_pids(_) -> ['undefined' | pid()]. -get_connections_pids(FromTo) -> - case catch mnesia:dirty_read(s2s, FromTo) of - L when is_list(L) -> - [Connection#s2s.pid || Connection <- L]; - _ -> - [] - end. - --spec try_register(fromto()) -> boolean(). +%% Called by ejabberd_s2s_out process. +-spec try_register(fromto()) -> IsRegistered :: boolean(). try_register(FromTo) -> - MaxS2SConnectionsNumber = max_s2s_connections_number(FromTo), - MaxS2SConnectionsNumberPerNode = - max_s2s_connections_number_per_node(FromTo), - F = fun() -> - L = mnesia:read({s2s, FromTo}), - NeededConnections = needed_connections_number( - L, MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode), - case NeededConnections > 0 of - true -> - mnesia:write(#s2s{fromto = FromTo, - pid = self()}), - true; - false -> - false - end - end, - case mnesia:transaction(F) of - {atomic, Res} -> - Res; + Pid = self(), + IsRegistered = call_try_register(Pid, FromTo), + case IsRegistered of + false -> + %% This usually happens when a ejabberd_s2s_out connection is established during dialback + %% procedure to check the key. + %% We still are fine, we just would not use that s2s connection to route + %% any stanzas to the remote server. + %% Could be a sign of abuse or a bug though, so use logging here. + ?LOG_INFO(#{what => s2s_register_failed, from_to => FromTo, pid => self()}); _ -> - false - end. + ok + end, + IsRegistered. -dirty_get_connections() -> - mnesia:dirty_all_keys(s2s). +-spec key(mongooseim:host_type(), fromto(), stream_id()) -> s2s_dialback_key(). +key(HostType, FromTo, StreamID) -> + {ok, Secret} = get_shared_secret(HostType), + mongoose_s2s_dialback:make_key(FromTo, StreamID, Secret). -%%==================================================================== %% Hooks callbacks -%%==================================================================== -spec node_cleanup(map(), map(), map()) -> {ok, map()}. node_cleanup(Acc, #{node := Node}, _) -> - F = fun() -> - Es = mnesia:select( - s2s, - [{#s2s{pid = '$1', _ = '_'}, - [{'==', {node, '$1'}, Node}], - ['$_']}]), - lists:foreach(fun(E) -> - mnesia:delete_object(E) - end, Es) - end, - Res = mnesia:async_dirty(F), + Res = call_node_cleanup(Node), {ok, maps:put(?MODULE, Res, Acc)}. --spec key(mongooseim:host_type(), {jid:lserver(), jid:lserver()}, binary()) -> - binary(). -key(HostType, {From, To}, StreamID) -> - Secret = get_shared_secret(HostType), - SecretHashed = base16:encode(crypto:hash(sha256, Secret)), - HMac = crypto:mac(hmac, sha256, SecretHashed, [From, " ", To, " ", StreamID]), - base16:encode(HMac). - -%%==================================================================== %% gen_server callbacks -%%==================================================================== - -%%-------------------------------------------------------------------- -%% Function: init(Args) -> {ok, State} | -%% {ok, State, Timeout} | -%% ignore | -%% {stop, Reason} -%% Description: Initiates the server -%%-------------------------------------------------------------------- + init([]) -> - mnesia:create_table(s2s, [{ram_copies, [node()]}, {type, bag}, - {attributes, record_info(fields, s2s)}]), - mnesia:add_table_copy(s2s, node(), ram_copies), - mnesia:create_table(s2s_shared, [{ram_copies, [node()]}, - {attributes, record_info(fields, s2s_shared)}]), - mnesia:add_table_copy(s2s_shared, node(), ram_copies), - {atomic, ok} = set_shared_secret(), - ejabberd_commands:register_commands(commands()), + internal_database_init(), + set_shared_secret(), + ejabberd_commands:register_commands(mongoose_s2s_lib:commands()), gen_hook:add_handlers(hooks()), {ok, #state{}}. -%%-------------------------------------------------------------------- -%% Function: %% handle_call(Request, From, State) -> {reply, Reply, State} | -%% {reply, Reply, State, Timeout} | -%% {noreply, State} | -%% {noreply, State, Timeout} | -%% {stop, Reason, Reply, State} | -%% {stop, Reason, State} -%% Description: Handling call messages -%%-------------------------------------------------------------------- handle_call(Request, From, State) -> ?UNEXPECTED_CALL(Request, From), {reply, {error, unexpected_call}, State}. -%%-------------------------------------------------------------------- -%% Function: handle_cast(Msg, State) -> {noreply, State} | -%% {noreply, State, Timeout} | -%% {stop, Reason, State} -%% Description: Handling cast messages -%%-------------------------------------------------------------------- handle_cast(Msg, State) -> ?UNEXPECTED_CAST(Msg), {noreply, State}. -%%-------------------------------------------------------------------- -%% Function: handle_info(Info, State) -> {noreply, State} | -%% {noreply, State, Timeout} | -%% {stop, Reason, State} -%% Description: Handling all non call/cast messages -%%-------------------------------------------------------------------- - handle_info(Msg, State) -> ?UNEXPECTED_INFO(Msg), {noreply, State}. -%%-------------------------------------------------------------------- -%% Function: terminate(Reason, State) -> void() -%% Description: This function is called by a gen_server when it is about to -%% terminate. It should be the opposite of Module:init/1 and do any necessary -%% cleaning up. When it returns, the gen_server terminates with Reason. -%% The return value is ignored. -%%-------------------------------------------------------------------- terminate(_Reason, _State) -> gen_hook:delete_handlers(hooks()), - ejabberd_commands:unregister_commands(commands()), + ejabberd_commands:unregister_commands(mongoose_s2s_lib:commands()), ok. -%%-------------------------------------------------------------------- -%% Func: code_change(OldVsn, State, Extra) -> {ok, NewState} -%% Description: Convert process state when code is changed -%%-------------------------------------------------------------------- code_change(_OldVsn, State, _Extra) -> {ok, State}. -%%-------------------------------------------------------------------- %%% Internal functions -%%-------------------------------------------------------------------- -spec hooks() -> [gen_hook:hook_tuple()]. hooks() -> [{node_cleanup, global, fun ?MODULE:node_cleanup/3, #{}, 50}]. @@ -277,19 +152,15 @@ hooks() -> do_route(From, To, Acc, Packet) -> ?LOG_DEBUG(#{what => s2s_route, acc => Acc}), case find_connection(From, To) of - {atomic, Pid} when is_pid(Pid) -> + {ok, Pid} when is_pid(Pid) -> ?LOG_DEBUG(#{what => s2s_found_connection, text => <<"Send packet to s2s connection">>, s2s_pid => Pid, acc => Acc}), - #xmlel{attrs = Attrs} = Packet, - NewAttrs = jlib:replace_from_to_attrs(jid:to_binary(From), - jid:to_binary(To), - Attrs), - NewPacket = Packet#xmlel{attrs = NewAttrs}, + NewPacket = jlib:replace_from_to(From, To, Packet), Acc1 = mongoose_hooks:s2s_send_packet(Acc, From, To, Packet), send_element(Pid, Acc1, NewPacket), {done, Acc1}; - {aborted, _Reason} -> + {error, _Reason} -> case mongoose_acc:stanza_type(Acc) of <<"error">> -> {done, Acc}; @@ -304,313 +175,110 @@ do_route(From, To, Acc, Packet) -> end end. --spec find_connection(From :: jid:jid(), - To :: jid:jid()) -> {'aborted', _} | {'atomic', _}. +-spec send_element(pid(), mongoose_acc:t(), exml:element()) -> ok. +send_element(Pid, Acc, El) -> + Pid ! {send_element, Acc, El}, + ok. + +-spec find_connection(From :: jid:jid(), To :: jid:jid()) -> + {ok, pid()} | {error, not_allowed}. find_connection(From, To) -> - #jid{lserver = MyServer} = From, - #jid{lserver = Server} = To, - FromTo = {MyServer, Server}, - MaxS2SConnectionsNumber = max_s2s_connections_number(FromTo), - MaxS2SConnectionsNumberPerNode = - max_s2s_connections_number_per_node(FromTo), - ?LOG_DEBUG(#{what => s2s_find_connection, from_server => MyServer, to_server => Server}), - case catch mnesia:dirty_read(s2s, FromTo) of - {'EXIT', Reason} -> - {aborted, Reason}; + FromTo = mongoose_s2s_lib:make_from_to(From, To), + ?LOG_DEBUG(#{what => s2s_find_connection, from_to => FromTo}), + OldCons = get_s2s_out_pids(FromTo), + NewCons = ensure_enough_connections(FromTo, OldCons), + case NewCons of [] -> - %% We try to establish all the connections if the host is not a - %% service and if the s2s host is not blacklisted or - %% is in whitelist: - maybe_open_several_connections(From, To, MyServer, Server, FromTo, - MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode); - L when is_list(L) -> - maybe_open_missing_connections(From, MyServer, Server, FromTo, - MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode, L) + {error, not_allowed}; + [_|_] -> + {ok, mongoose_s2s_lib:choose_pid(From, NewCons)} end. -maybe_open_missing_connections(From, MyServer, Server, FromTo, - MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode, L) -> - NeededConnections = needed_connections_number( - L, MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode), +%% Opens more connections if needed and allowed. +%% Returns an updated list of connections. +-spec ensure_enough_connections(fromto(), s2s_pids()) -> s2s_pids(). +ensure_enough_connections(FromTo, OldCons) -> + NeededConnections = + mongoose_s2s_lib:needed_extra_connections_number_if_allowed(FromTo, OldCons), + %% Could be negative, if we have too many connections case NeededConnections > 0 of true -> - %% We establish the missing connections for this pair. - open_several_connections( - NeededConnections, MyServer, - Server, From, FromTo, - MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode); + open_new_connections(NeededConnections, FromTo), + %% Query for s2s pids one more time + get_s2s_out_pids(FromTo); false -> - %% We choose a connexion from the pool of opened ones. - {atomic, choose_connection(From, L)} + OldCons end. -maybe_open_several_connections(From, To, MyServer, Server, FromTo, - MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode) -> - %% We try to establish all the connections if the host is not a - %% service and if the s2s host is not blacklisted or - %% is in whitelist: - case not is_service(From, To) andalso allow_host(MyServer, Server) of - true -> - NeededConnections = needed_connections_number( - [], MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode), - open_several_connections( - NeededConnections, MyServer, - Server, From, FromTo, - MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode); - false -> - {aborted, error} - end. +-spec open_new_connections(N :: pos_integer(), FromTo :: fromto()) -> ok. +open_new_connections(N, FromTo) -> + [open_new_connection(FromTo) || _N <- lists:seq(1, N)], + ok. --spec choose_connection(From :: jid:jid(), - Connections :: [s2s()]) -> any(). -choose_connection(From, Connections) -> - choose_pid(From, [C#s2s.pid || C <- Connections]). - --spec choose_pid(From :: jid:jid(), Pids :: [pid()]) -> pid(). -choose_pid(From, Pids) -> - Pids1 = case [P || P <- Pids, node(P) == node()] of - [] -> Pids; - Ps -> Ps - end, - % Use sticky connections based on the JID of the sender - % (without the resource to ensure that a muc room always uses the same connection) - Pid = lists:nth(erlang:phash2(jid:to_bare(From), length(Pids1)) + 1, Pids1), - ?LOG_DEBUG(#{what => s2s_choose_pid, from => From, s2s_pid => Pid}), - Pid. - --spec open_several_connections(N :: pos_integer(), MyServer :: jid:server(), - Server :: jid:server(), From :: jid:jid(), FromTo :: fromto(), - MaxS2S :: pos_integer(), MaxS2SPerNode :: pos_integer()) - -> {'aborted', _} | {'atomic', _}. -open_several_connections(N, MyServer, Server, From, FromTo, - MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode) -> - ConnectionsResult = - [new_connection(MyServer, Server, From, FromTo, - MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode) - || _N <- lists:seq(1, N)], - case [PID || {atomic, PID} <- ConnectionsResult] of - [] -> - hd(ConnectionsResult); - PIDs -> - {atomic, choose_pid(From, PIDs)} - end. +-spec open_new_connection(FromTo :: fromto()) -> ok. +open_new_connection(FromTo) -> + %% Start a process, but do not connect to the server yet. + {ok, Pid} = ejabberd_s2s_out:start(FromTo, new), + %% Try to write the Pid into Mnesia/CETS + IsRegistered = call_try_register(Pid, FromTo), + maybe_start_connection(Pid, FromTo, IsRegistered), + ok. --spec new_connection(MyServer :: jid:server(), Server :: jid:server(), - From :: jid:jid(), FromTo :: fromto(), MaxS2S :: pos_integer(), - MaxS2SPerNode :: pos_integer()) -> {'aborted', _} | {'atomic', _}. -new_connection(MyServer, Server, From, FromTo = {FromServer, ToServer}, - MaxS2SConnectionsNumber, MaxS2SConnectionsNumberPerNode) -> - {ok, Pid} = ejabberd_s2s_out:start( - MyServer, Server, new), - F = fun() -> - L = mnesia:read({s2s, FromTo}), - NeededConnections = needed_connections_number( - L, MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode), - case NeededConnections > 0 of - true -> - mnesia:write(#s2s{fromto = FromTo, - pid = Pid}), - ?LOG_INFO(#{what => s2s_new_connection, - text => <<"New s2s connection started">>, - from_server => FromServer, - to_server => ToServer, - s2s_pid => Pid}), - Pid; - false -> - choose_connection(From, L) - end - end, - TRes = mnesia:transaction(F), - case TRes of - {atomic, Pid} -> - ejabberd_s2s_out:start_connection(Pid); - _ -> - ejabberd_s2s_out:stop_connection(Pid) - end, - TRes. - --spec max_s2s_connections_number(fromto()) -> pos_integer(). -max_s2s_connections_number({From, To}) -> - {ok, HostType} = mongoose_domain_api:get_host_type(From), - case acl:match_rule(HostType, max_s2s_connections, jid:make(<<"">>, To, <<"">>)) of - Max when is_integer(Max) -> Max; - _ -> ?DEFAULT_MAX_S2S_CONNECTIONS_NUMBER - end. +%% If registration is successful, create an actual network connection. +%% If not successful, remove the process. +-spec maybe_start_connection(Pid :: pid(), FromTo :: fromto(), IsRegistered :: boolean()) -> ok. +maybe_start_connection(Pid, FromTo, true) -> + ?LOG_INFO(#{what => s2s_new_connection, + text => <<"New s2s connection started">>, + from_to => FromTo, s2s_pid => Pid}), + ejabberd_s2s_out:start_connection(Pid); +maybe_start_connection(Pid, _FromTo, false) -> + ejabberd_s2s_out:stop_connection(Pid). + +-spec set_shared_secret() -> ok. +set_shared_secret() -> + [set_shared_secret(HostType) || HostType <- ?ALL_HOST_TYPES], + ok. --spec max_s2s_connections_number_per_node(fromto()) -> pos_integer(). -max_s2s_connections_number_per_node({From, To}) -> - {ok, HostType} = mongoose_domain_api:get_host_type(From), - case acl:match_rule(HostType, max_s2s_connections_per_node, jid:make(<<"">>, To, <<"">>)) of - Max when is_integer(Max) -> Max; - _ -> ?DEFAULT_MAX_S2S_CONNECTIONS_NUMBER_PER_NODE +%% Updates the secret across the cluster if needed +-spec set_shared_secret(mongooseim:host_type()) -> ok. +set_shared_secret(HostType) -> + case mongoose_s2s_lib:check_shared_secret(HostType, get_shared_secret(HostType)) of + {update, NewSecret} -> + register_secret(HostType, NewSecret); + ok -> + ok end. --spec needed_connections_number([any()], pos_integer(), pos_integer()) -> integer(). -needed_connections_number(Ls, MaxS2SConnectionsNumber, - MaxS2SConnectionsNumberPerNode) -> - LocalLs = [L || L <- Ls, node(L#s2s.pid) == node()], - lists:min([MaxS2SConnectionsNumber - length(Ls), - MaxS2SConnectionsNumberPerNode - length(LocalLs)]). - -%%-------------------------------------------------------------------- -%% Function: is_service(From, To) -> true | false -%% Description: Return true if the destination must be considered as a -%% service. -%% -------------------------------------------------------------------- --spec is_service(jid:jid(), jid:jid()) -> boolean(). -is_service(From, To) -> - LFromDomain = From#jid.lserver, - case mongoose_config:lookup_opt({route_subdomains, LFromDomain}) of - {ok, s2s} -> % bypass RFC 3920 10.3 - false; - {error, not_found} -> - Hosts = ?MYHOSTS, - P = fun(ParentDomain) -> lists:member(ParentDomain, Hosts) end, - lists:any(P, parent_domains(To#jid.lserver)) - end. +%% Backend logic functions --spec parent_domains(binary()) -> [binary(), ...]. -parent_domains(Domain) -> - parent_domains(Domain, [Domain]). +-spec internal_database_init() -> ok. +internal_database_init() -> + Backend = mongoose_config:get_opt(s2s_backend), + mongoose_s2s_backend:init(#{backend => Backend}). --spec parent_domains(binary(), [binary(), ...]) -> [binary(), ...]. -parent_domains(<<>>, Acc) -> - lists:reverse(Acc); -parent_domains(<<$., Rest/binary>>, Acc) -> - parent_domains(Rest, [Rest | Acc]); -parent_domains(<<_, Rest/binary>>, Acc) -> - parent_domains(Rest, Acc). +%% Get ejabberd_s2s_out pids +-spec get_s2s_out_pids(FromTo :: fromto()) -> s2s_pids(). +get_s2s_out_pids(FromTo) -> + mongoose_s2s_backend:get_s2s_out_pids(FromTo). --spec send_element(pid(), mongoose_acc:t(), exml:element()) -> - {'send_element', mongoose_acc:t(), exml:element()}. -send_element(Pid, Acc, El) -> - Pid ! {send_element, Acc, El}. - -timeout() -> - 600000. -%%-------------------------------------------------------------------- -%% Function: domain_utf8_to_ascii(Domain) -> binary() | false -%% Description: Converts a UTF-8 domain to ASCII (IDNA) -%% -------------------------------------------------------------------- --spec domain_utf8_to_ascii(binary() | string()) -> binary() | false. -domain_utf8_to_ascii(Domain) -> - case catch idna:utf8_to_ascii(Domain) of - {'EXIT', _} -> - false; - AsciiDomain -> - list_to_binary(AsciiDomain) - end. +%% Returns true if the connection is registered +-spec call_try_register(Pid :: pid(), FromTo :: fromto()) -> IsRegistered :: boolean(). +call_try_register(Pid, FromTo) -> + mongoose_s2s_backend:try_register(Pid, FromTo). -%%%---------------------------------------------------------------------- -%%% ejabberd commands - --spec commands() -> [ejabberd_commands:cmd(), ...]. -commands() -> - [ - #ejabberd_commands{name = incoming_s2s_number, - tags = [stats, s2s], - desc = "Number of incoming s2s connections on the node", - module = stats_api, function = incoming_s2s_number, - args = [], - result = {s2s_incoming, integer}}, - #ejabberd_commands{name = outgoing_s2s_number, - tags = [stats, s2s], - desc = "Number of outgoing s2s connections on the node", - module = stats_api, function = outgoing_s2s_number, - args = [], - result = {s2s_outgoing, integer}} - ]. - -%% Check if host is in blacklist or white list -allow_host(MyServer, S2SHost) -> - case mongoose_domain_api:get_host_type(MyServer) of - {error, not_found} -> - false; - {ok, HostType} -> - case mongoose_config:lookup_opt([{s2s, HostType}, host_policy, S2SHost]) of - {ok, allow} -> - true; - {ok, deny} -> - false; - {error, not_found} -> - mongoose_config:get_opt([{s2s, HostType}, default_policy]) =:= allow - andalso mongoose_hooks:s2s_allow_host(MyServer, S2SHost) =:= allow - end - end. +-spec call_node_cleanup(Node :: node()) -> ok. +call_node_cleanup(Node) -> + mongoose_s2s_backend:node_cleanup(Node). + +-spec remove_connection(fromto(), pid()) -> ok. +remove_connection(FromTo, Pid) -> + mongoose_s2s_backend:remove_connection(FromTo, Pid). -%% @doc Get information about S2S connections of the specified type. --spec get_info_s2s_connections('in' | 'out') -> [[{atom(), any()}, ...]]. -get_info_s2s_connections(Type) -> - ChildType = case Type of - in -> ejabberd_s2s_in_sup; - out -> ejabberd_s2s_out_sup - end, - Connections = supervisor:which_children(ChildType), - get_s2s_info(Connections, Type). - --type connstate() :: 'restarting' | 'undefined' | pid(). --type conn() :: { any(), connstate(), 'supervisor' | 'worker', 'dynamic' | [_] }. --spec get_s2s_info(Connections :: [conn()], - Type :: 'in' | 'out' - ) -> [[{any(), any()}, ...]]. % list of lists -get_s2s_info(Connections, Type)-> - complete_s2s_info(Connections, Type, []). - --spec complete_s2s_info(Connections :: [conn()], - Type :: 'in' | 'out', - Result :: [[{any(), any()}, ...]] % list of lists - ) -> [[{any(), any()}, ...]]. % list of lists -complete_s2s_info([], _, Result)-> - Result; -complete_s2s_info([Connection|T], Type, Result)-> - {_, PID, _, _}=Connection, - State = get_s2s_state(PID), - complete_s2s_info(T, Type, [State|Result]). - --spec get_s2s_state(connstate()) -> [{atom(), any()}, ...]. -get_s2s_state(S2sPid)-> - Infos = case gen_fsm_compat:sync_send_all_state_event(S2sPid, get_state_infos) of - {state_infos, Is} -> [{status, open} | Is]; - {noproc, _} -> [{status, closed}]; %% Connection closed - {badrpc, _} -> [{status, error}] - end, - [{s2s_pid, S2sPid} | Infos]. - --spec get_shared_secret(mongooseim:host_type()) -> binary(). +-spec get_shared_secret(mongooseim:host_type()) -> {ok, base16_secret()} | {error, not_found}. get_shared_secret(HostType) -> - [#s2s_shared{secret = Secret}] = ets:lookup(s2s_shared, HostType), - Secret. + mongoose_s2s_backend:get_shared_secret(HostType). --spec set_shared_secret() -> {atomic, ok} | {aborted, term()}. -set_shared_secret() -> - mnesia:transaction(fun() -> - [set_shared_secret_t(HostType) || HostType <- ?ALL_HOST_TYPES], - ok - end). - --spec set_shared_secret_t(mongooseim:host_type()) -> ok. -set_shared_secret_t(HostType) -> - Secret = case mongoose_config:lookup_opt([{s2s, HostType}, shared]) of - {ok, SecretFromConfig} -> - SecretFromConfig; - {error, not_found} -> - base16:encode(crypto:strong_rand_bytes(10)) - end, - mnesia:write(#s2s_shared{host_type = HostType, secret = Secret}). - --spec lookup_certfile(mongooseim:host_type()) -> {ok, string()} | {error, not_found}. -lookup_certfile(HostType) -> - case mongoose_config:lookup_opt({domain_certfile, HostType}) of - {ok, CertFile} -> - CertFile; - {error, not_found} -> - mongoose_config:lookup_opt([{s2s, HostType}, certfile]) - end. +-spec register_secret(mongooseim:host_type(), base16_secret()) -> ok. +register_secret(HostType, Secret) -> + mongoose_s2s_backend:register_secret(HostType, Secret). diff --git a/src/ejabberd_s2s_in.erl b/src/ejabberd_s2s_in.erl index e090b7e95c2..ba03fdcdc51 100644 --- a/src/ejabberd_s2s_in.erl +++ b/src/ejabberd_s2s_in.erl @@ -34,6 +34,7 @@ %% External exports -export([start/2, start_link/2, + send_validity_from_s2s_out/3, match_domain/2]). %% gen_fsm callbacks @@ -47,29 +48,45 @@ handle_info/3, terminate/3]). +-export_type([connection_info/0]). + -ignore_xref([match_domain/2, start/2, start_link/2, stream_established/2, wait_for_feature_request/2, wait_for_stream/2]). -include("mongoose.hrl"). -include("jlib.hrl"). --record(state, {socket, - streamid :: binary(), - shaper, +-record(state, {socket :: mongoose_transport:socket_data(), + streamid :: ejabberd_s2s:stream_id(), + shaper :: shaper:shaper(), tls = false :: boolean(), tls_enabled = false :: boolean(), tls_required = false :: boolean(), tls_cert_verify = false :: boolean(), tls_options :: mongoose_tls:options(), - server :: jid:server() | undefined, + server :: jid:lserver() | undefined, host_type :: mongooseim:host_type() | undefined, authenticated = false :: boolean(), - auth_domain :: binary() | undefined, - connections = dict:new(), + auth_domain :: jid:lserver() | undefined, + connections = #{} :: map(), timer :: reference() }). -type state() :: #state{}. +-type connection_info() :: + #{pid => pid(), + direction => in, + statename => statename(), + addr => inet:ip_address(), + port => inet:port_number(), + streamid => ejabberd_s2s:stream_id(), + tls => boolean(), + tls_enabled => boolean(), + tls_options => mongoose_tls:options(), + authenticated => boolean(), + shaper => shaper:shaper(), + domains => [jid:lserver()]}. + -type statename() :: 'stream_established' | 'wait_for_feature_request'. %% FSM handler return value -type fsm_return() :: {'stop', Reason :: 'normal', state()} @@ -83,9 +100,6 @@ -define(FSMOPTS, []). -endif. --define(SUPERVISOR_START, supervisor:start_child(ejabberd_s2s_in_sup, - [Socket, Opts])). - -define(STREAM_HEADER(Version), (<<"" " {error, _} | {ok, undefined | pid()} | {ok, undefined | pid(), _}. start(Socket, Opts) -> - ?SUPERVISOR_START. + supervisor:start_child(ejabberd_s2s_in_sup, [Socket, Opts]). -spec start_link(socket(), options()) -> ignore | {error, _} | {ok, pid()}. start_link(Socket, Opts) -> @@ -114,6 +128,11 @@ start_link(Socket, Opts) -> start_listener(Opts) -> mongoose_tcp_listener:start_listener(Opts). +-spec send_validity_from_s2s_out(pid(), boolean(), ejabberd_s2s:fromto()) -> ok. +send_validity_from_s2s_out(Pid, IsValid, FromTo) when is_boolean(IsValid) -> + Event = {validity_from_s2s_out, IsValid, FromTo}, + p1_fsm:send_event(Pid, Event). + %%%---------------------------------------------------------------------- %%% Callback functions from gen_fsm %%%---------------------------------------------------------------------- @@ -127,10 +146,10 @@ start_listener(Opts) -> %%---------------------------------------------------------------------- -spec init([socket() | options(), ...]) -> {ok, wait_for_stream, state()}. init([Socket, #{shaper := Shaper, tls := TLSOpts}]) -> - ?LOG_DEBUG(#{what => s2n_in_started, + ?LOG_DEBUG(#{what => s2s_in_started, text => <<"New incoming S2S connection">>, socket => Socket}), - Timer = erlang:start_timer(ejabberd_s2s:timeout(), self(), []), + Timer = erlang:start_timer(mongoose_s2s_lib:timeout(), self(), []), {ok, wait_for_stream, #state{socket = Socket, streamid = new_id(), @@ -147,14 +166,15 @@ init([Socket, #{shaper := Shaper, tls := TLSOpts}]) -> %%---------------------------------------------------------------------- -spec wait_for_stream(ejabberd:xml_stream_item(), state()) -> fsm_return(). -wait_for_stream({xmlstreamstart, _Name, Attrs}, StateData) -> +wait_for_stream({xmlstreamstart, _Name, Attrs} = Event, StateData) -> case maps:from_list(Attrs) of AttrMap = #{<<"xmlns">> := <<"jabber:server">>, <<"to">> := Server} -> case StateData#state.server of undefined -> case mongoose_domain_api:get_host_type(Server) of {error, not_found} -> - stream_start_error(StateData, mongoose_xmpp_errors:host_unknown()); + Info = #{location => ?LOCATION, last_event => Event}, + stream_start_error(StateData, Info, mongoose_xmpp_errors:host_unknown()); {ok, HostType} -> UseTLS = mongoose_config:get_opt([{s2s, HostType}, use_starttls]), {StartTLS, TLSRequired, TLSCertVerify} = get_tls_params(UseTLS), @@ -168,22 +188,30 @@ wait_for_stream({xmlstreamstart, _Name, Attrs}, StateData) -> start_stream(AttrMap, StateData); _Other -> Msg = <<"The 'to' attribute differs from the originally provided one">>, - stream_start_error(StateData, mongoose_xmpp_errors:host_unknown(?MYLANG, Msg)) + Info = #{location => ?LOCATION, last_event => Event, + expected_server => StateData#state.server, provided_server => Server}, + stream_start_error(StateData, Info, mongoose_xmpp_errors:host_unknown(?MYLANG, Msg)) end; #{<<"xmlns">> := <<"jabber:server">>} -> Msg = <<"The 'to' attribute is missing">>, - stream_start_error(StateData, mongoose_xmpp_errors:improper_addressing(?MYLANG, Msg)); + Info = #{location => ?LOCATION, last_event => Event}, + stream_start_error(StateData, Info, mongoose_xmpp_errors:improper_addressing(?MYLANG, Msg)); _ -> - stream_start_error(StateData, mongoose_xmpp_errors:invalid_namespace()) + Info = #{location => ?LOCATION, last_event => Event}, + stream_start_error(StateData, Info, mongoose_xmpp_errors:invalid_namespace()) end; -wait_for_stream({xmlstreamerror, _}, StateData) -> - stream_start_error(StateData, mongoose_xmpp_errors:xml_not_well_formed()); +wait_for_stream({xmlstreamerror, _} = Event, StateData) -> + Info = #{location => ?LOCATION, last_event => Event, + reason => s2s_in_wait_for_stream_error}, + stream_start_error(StateData, Info, mongoose_xmpp_errors:xml_not_well_formed()); wait_for_stream(timeout, StateData) -> + ?LOG_WARNING(#{what => s2s_in_wait_for_stream_timeout}), {stop, normal, StateData}; wait_for_stream(closed, StateData) -> + ?LOG_WARNING(#{what => s2s_in_wait_for_stream_closed}), {stop, normal, StateData}. -start_stream(#{<<"version">> := <<"1.0">>, <<"from">> := RemoteServer}, +start_stream(#{<<"version">> := <<"1.0">>, <<"from">> := RemoteServer} = Event, StateData = #state{tls = true, authenticated = false, server = Server, host_type = HostType}) -> SASL = case StateData#state.tls_enabled of @@ -196,19 +224,19 @@ start_stream(#{<<"version">> := <<"1.0">>, <<"from">> := RemoteServer}, StartTLS = get_tls_xmlel(StateData), case SASL of {error_cert_verif, CertError} -> - ?LOG_INFO(#{what => s2s_connection_closing, - text => <<"Closing s2s connection">>, - server => StateData#state.server, - remote_server => RemoteServer, - reason => cert_error, - cert_error => CertError}), - Res = stream_start_error(StateData, - mongoose_xmpp_errors:policy_violation(?MYLANG, CertError)), - %% FIXME: why do we want stop just one of the connections here? - {atomic, Pid} = ejabberd_s2s:find_connection(jid:make(<<>>, Server, <<>>), - jid:make(<<>>, RemoteServer, <<>>)), - ejabberd_s2s_out:stop_connection(Pid), - Res; + ?LOG_WARNING(#{what => s2s_connection_closing, + text => <<"Closing s2s connection">>, + server => StateData#state.server, + remote_server => RemoteServer, + reason => cert_error, + cert_error => CertError}), + Info = #{location => ?LOCATION, last_event => Event, reason => error_cert_verif}, + stream_start_error(StateData, Info, + mongoose_xmpp_errors:policy_violation(?MYLANG, CertError)); + %% We were stopping ejabberd_s2s_out connection in the older version of the code + %% from this location. But stopping outgoing connections just because a non-verified + %% incoming connection fails is an abuse risk (a hacker could connect with an invalid + %% certificate, it should not cause stopping ejabberd_s2s_out connections). _ -> send_text(StateData, ?STREAM_HEADER(<<" version='1.0'">>)), send_element(StateData, @@ -225,13 +253,15 @@ start_stream(#{<<"version">> := <<"1.0">>}, start_stream(#{<<"xmlns:db">> := <<"jabber:server:dialback">>}, StateData) -> send_text(StateData, ?STREAM_HEADER(<<>>)), {next_state, stream_established, StateData}; -start_stream(_, StateData) -> - stream_start_error(StateData, mongoose_xmpp_errors:invalid_xml()). +start_stream(Event, StateData) -> + Info = #{location => ?LOCATION, last_event => Event}, + stream_start_error(StateData, Info, mongoose_xmpp_errors:invalid_xml()). -stream_start_error(StateData, Error) -> +stream_start_error(StateData, Info, Error) -> send_text(StateData, ?STREAM_HEADER(<<>>)), send_element(StateData, Error), send_text(StateData, ?STREAM_TRAILER), + ?LOG_WARNING(Info#{what => s2s_in_stream_start_error, element => Error}), {stop, normal, StateData}. -spec wait_for_feature_request(ejabberd:xml_stream_item(), state() @@ -270,6 +300,7 @@ wait_for_feature_request({xmlstreamelement, El}, StateData) -> #xmlel{name = <<"failure">>, attrs = [{<<"xmlns">>, ?NS_SASL}], children = [#xmlel{name = <<"invalid-mechanism">>}]}), + ?LOG_WARNING(#{what => s2s_in_invalid_mechanism}), {stop, normal, StateData} end; _ -> @@ -277,16 +308,19 @@ wait_for_feature_request({xmlstreamelement, El}, StateData) -> end; wait_for_feature_request({xmlstreamend, _Name}, StateData) -> send_text(StateData, ?STREAM_TRAILER), + ?LOG_WARNING(#{what => s2s_in_got_stream_end_before_feature_request}), {stop, normal, StateData}; wait_for_feature_request({xmlstreamerror, _}, StateData) -> send_element(StateData, mongoose_xmpp_errors:xml_not_well_formed()), send_text(StateData, ?STREAM_TRAILER), + ?LOG_WARNING(#{what => s2s_in_got_stream_error_before_feature_request}), {stop, normal, StateData}; wait_for_feature_request(closed, StateData) -> + ?LOG_WARNING(#{what => s2s_in_got_closed_before_feature_request}), {stop, normal, StateData}. tls_options_with_certfile(#state{host_type = HostType, tls_options = TLSOptions}) -> - case ejabberd_s2s:lookup_certfile(HostType) of + case mongoose_s2s_lib:lookup_certfile(HostType) of {ok, CertFile} -> TLSOptions#{certfile => CertFile}; {error, not_found} -> TLSOptions end. @@ -294,231 +328,158 @@ tls_options_with_certfile(#state{host_type = HostType, tls_options = TLSOptions} -spec stream_established(ejabberd:xml_stream_item(), state()) -> fsm_return(). stream_established({xmlstreamelement, El}, StateData) -> cancel_timer(StateData#state.timer), - Timer = erlang:start_timer(ejabberd_s2s:timeout(), self(), []), - case is_key_packet(El) of - {key, To, From, Id, Key} -> + Timer = erlang:start_timer(mongoose_s2s_lib:timeout(), self(), []), + case mongoose_s2s_dialback:parse_key(El) of + %% Incoming dialback key, we have to verify it using ejabberd_s2s_out before + %% accepting any incoming stanzas + %% (we have to receive the `validity_from_s2s_out' event first). + {step_1, FromTo, StreamID, Key} = Parsed -> ?LOG_DEBUG(#{what => s2s_in_get_key, - to => To, from => From, message_id => Id, key => Key}), - LTo = jid:nameprep(To), - LFrom = jid:nameprep(From), + from_to => FromTo, stream_id => StreamID, key => Key}), %% Checks if the from domain is allowed and if the to %% domain is handled by this server: - case {ejabberd_s2s:allow_host(LTo, LFrom), - mongoose_router:is_registered_route(LTo) - orelse ejabberd_router:is_component_dirty(LTo)} of + case {mongoose_s2s_lib:allow_host(FromTo), is_local_host_known(FromTo)} of {true, true} -> - ejabberd_s2s_out:terminate_if_waiting_delay(LTo, LFrom), - ejabberd_s2s_out:start(LTo, LFrom, - {verify, self(), - Key, StateData#state.streamid}), - Conns = dict:store({LFrom, LTo}, wait_for_verification, - StateData#state.connections), - change_shaper(StateData, LTo, jid:make(<<>>, LFrom, <<>>)), + ejabberd_s2s_out:terminate_if_waiting_delay(FromTo), + StartType = {verify, self(), Key, StateData#state.streamid}, + %% Could we reuse an existing ejabberd_s2s_out connection + %% instead of making a new one? + ejabberd_s2s_out:start(FromTo, StartType), + Conns = maps:put(FromTo, wait_for_verification, + StateData#state.connections), + change_shaper(StateData, FromTo), {next_state, stream_established, - StateData#state{connections = Conns, - timer = Timer}}; + StateData#state{connections = Conns, timer = Timer}}; {_, false} -> send_element(StateData, mongoose_xmpp_errors:host_unknown()), + ?LOG_WARNING(#{what => s2s_in_key_from_uknown_host, element => El, + parsed => Parsed, from_to => FromTo}), {stop, normal, StateData}; {false, _} -> send_element(StateData, mongoose_xmpp_errors:invalid_from()), + ?LOG_WARNING(#{what => s2s_in_key_with_invalid_from, element => El}), {stop, normal, StateData} end; - {verify, To, From, Id, Key} -> + %% Incoming dialback verification request + %% We have to check it using secrets and reply if it is valid or not + {step_2, FromTo, StreamID, Key} -> ?LOG_DEBUG(#{what => s2s_in_verify_key, - to => To, from => From, message_id => Id, key => Key}), - LTo = jid:nameprep(To), - LFrom = jid:nameprep(From), - Type = case ejabberd_s2s:key(StateData#state.host_type, {LTo, LFrom}, Id) of - Key -> <<"valid">>; - _ -> <<"invalid">> - end, - send_element(StateData, - #xmlel{name = <<"db:verify">>, - attrs = [{<<"from">>, To}, - {<<"to">>, From}, - {<<"id">>, Id}, - {<<"type">>, Type}]}), + from_to => FromTo, stream_id => StreamID, key => Key}), + IsValid = Key =:= ejabberd_s2s:key(StateData#state.host_type, FromTo, StreamID), + send_element(StateData, mongoose_s2s_dialback:step_3(FromTo, StreamID, IsValid)), {next_state, stream_established, StateData#state{timer = Timer}}; - _ -> - NewEl = jlib:remove_attr(<<"xmlns">>, El), - #xmlel{attrs = Attrs} = NewEl, - FromS = xml:get_attr_s(<<"from">>, Attrs), - From = jid:from_binary(FromS), - ToS = xml:get_attr_s(<<"to">>, Attrs), - To = jid:from_binary(ToS), - case {From, To} of - {error, _} -> ok; - {_, error} -> ok; - _ -> route_incoming_stanza(From, To, NewEl, StateData) - end, + false -> + Res = parse_and_route_incoming_stanza(El, StateData), + handle_routing_result(Res, El, StateData), {next_state, stream_established, StateData#state{timer = Timer}} end; -stream_established({valid, From, To}, StateData) -> - send_element(StateData, - #xmlel{name = <<"db:result">>, - attrs = [{<<"from">>, To}, - {<<"to">>, From}, - {<<"type">>, <<"valid">>}]}), - LFrom = jid:nameprep(From), - LTo = jid:nameprep(To), - NSD = StateData#state{ - connections = dict:store({LFrom, LTo}, established, - StateData#state.connections)}, - {next_state, stream_established, NSD}; -stream_established({invalid, From, To}, StateData) -> - send_element(StateData, - #xmlel{name = <<"db:result">>, - attrs = [{<<"from">>, To}, - {<<"to">>, From}, - {<<"type">>, <<"invalid">>}]}), - LFrom = jid:nameprep(From), - LTo = jid:nameprep(To), - NSD = StateData#state{ - connections = dict:erase({LFrom, LTo}, - StateData#state.connections)}, - {next_state, stream_established, NSD}; +stream_established({validity_from_s2s_out, IsValid, FromTo}, StateData) -> + handle_validity_from_s2s_out(IsValid, FromTo, StateData); stream_established({xmlstreamend, _Name}, StateData) -> send_text(StateData, ?STREAM_TRAILER), {stop, normal, StateData}; stream_established({xmlstreamerror, _}, StateData) -> send_element(StateData, mongoose_xmpp_errors:xml_not_well_formed()), send_text(StateData, ?STREAM_TRAILER), + ?LOG_WARNING(#{what => s2s_in_stream_error, state_name => stream_established}), {stop, normal, StateData}; stream_established(timeout, StateData) -> {stop, normal, StateData}; stream_established(closed, StateData) -> {stop, normal, StateData}. --spec route_incoming_stanza(From :: jid:jid(), - To :: jid:jid(), +-spec handle_validity_from_s2s_out(boolean(), ejabberd_s2s:fromto(), #state{}) -> + {next_state, stream_established, #state{}}. +handle_validity_from_s2s_out(IsValid, FromTo, StateData) -> + send_element(StateData, mongoose_s2s_dialback:step_4(FromTo, IsValid)), + {next_state, stream_established, update_connections(IsValid, FromTo, StateData)}. + +update_connections(true, FromTo, StateData = #state{connections = Cons}) -> + StateData#state{connections = maps:put(FromTo, established, Cons)}; +update_connections(false, FromTo, StateData = #state{connections = Cons}) -> + StateData#state{connections = maps:remove(FromTo, Cons)}. + +handle_routing_result(ok, _El, _StateData) -> + ok; +handle_routing_result({error, Reason}, El, _StateData) -> + ?LOG_WARNING(#{what => s2s_in_route_failed, reason => Reason, element => El}). + +parse_and_route_incoming_stanza(El, StateData) -> + NewEl = jlib:remove_attr(<<"xmlns">>, El), + RemoteJid = jid:from_binary(exml_query:attr(El, <<"from">>, <<>>)), + LocalJid = jid:from_binary(exml_query:attr(El, <<"to">>, <<>>)), + case {RemoteJid, LocalJid, is_valid_stanza(NewEl)} of + {#jid{}, #jid{}, true} -> + route_incoming_stanza(RemoteJid, LocalJid, NewEl, StateData); + _ -> + {error, invalid_stanza} + end. + +-spec route_incoming_stanza(RemoteJid :: jid:jid(), + LocalJid :: jid:jid(), El :: exml:element(), - StateData :: state()) -> - mongoose_acc:t() | error. -route_incoming_stanza(From, To, El, StateData) -> - LFromS = From#jid.lserver, - LToS = To#jid.lserver, - #xmlel{name = Name} = El, + StateData :: state()) -> ok | {error, term()}. +route_incoming_stanza(RemoteJid, LocalJid, El, StateData) -> + LRemoteServer = RemoteJid#jid.lserver, + LLocalServer = LocalJid#jid.lserver, + FromTo = {LLocalServer, LRemoteServer}, Acc = mongoose_acc:new(#{ location => ?LOCATION, - lserver => LToS, + lserver => LLocalServer, element => El, - from_jid => From, - to_jid => To }), - case is_s2s_authenticated(LFromS, LToS, StateData) of + from_jid => RemoteJid, + to_jid => LocalJid }), + case is_s2s_authenticated_or_connected(FromTo, StateData) of true -> - route_stanza(Name, Acc); + route_stanza(Acc); false -> - case is_s2s_connected(LFromS, LToS, StateData) of - true -> - route_stanza(Name, Acc); - false -> - error - end + {error, not_allowed} end. -is_s2s_authenticated(_, _, #state{authenticated = false}) -> +is_s2s_authenticated_or_connected(FromTo, StateData) -> + is_s2s_authenticated(FromTo, StateData) orelse + is_s2s_connected(FromTo, StateData). + +-spec is_s2s_authenticated(ejabberd_s2s:fromto(), #state{}) -> boolean(). +is_s2s_authenticated(_FromTo, #state{authenticated = false}) -> false; -is_s2s_authenticated(LFrom, LTo, #state{auth_domain = LFrom}) -> - mongoose_router:is_registered_route(LTo) - orelse ejabberd_router:is_component_dirty(LTo); -is_s2s_authenticated(_, _, _) -> - false. +is_s2s_authenticated(FromTo, State) -> + same_auth_domain(FromTo, State) andalso is_local_host_known(FromTo). -is_s2s_connected(LFrom, LTo, StateData) -> - case dict:find({LFrom, LTo}, StateData#state.connections) of - {ok, established} -> - true; - _ -> - false - end. +-spec same_auth_domain(ejabberd_s2s:fromto(), #state{}) -> boolean(). +same_auth_domain({_, LRemoteServer}, #state{auth_domain = AuthDomain}) -> + LRemoteServer =:= AuthDomain. + +-spec is_s2s_connected(ejabberd_s2s:fromto(), #state{}) -> boolean(). +is_s2s_connected(FromTo, StateData) -> + established =:= maps:get(FromTo, StateData#state.connections, false). + +-spec is_valid_stanza(exml:element()) -> boolean(). +is_valid_stanza(#xmlel{name = Name}) -> + is_valid_stanza_name(Name). --spec route_stanza(binary(), mongoose_acc:t()) -> mongoose_acc:t(). -route_stanza(<<"iq">>, Acc) -> - route_stanza(Acc); -route_stanza(<<"message">>, Acc) -> - route_stanza(Acc); -route_stanza(<<"presence">>, Acc) -> - route_stanza(Acc); -route_stanza(_, _Acc) -> - error. - --spec route_stanza(mongoose_acc:t()) -> mongoose_acc:t(). +is_valid_stanza_name(<<"iq">>) -> true; +is_valid_stanza_name(<<"message">>) -> true; +is_valid_stanza_name(<<"presence">>) -> true; +is_valid_stanza_name(_) -> false. + +-spec route_stanza(mongoose_acc:t()) -> ok. route_stanza(Acc) -> From = mongoose_acc:from_jid(Acc), To = mongoose_acc:to_jid(Acc), Acc1 = mongoose_hooks:s2s_receive_packet(Acc), - ejabberd_router:route(From, To, Acc1). - -%%---------------------------------------------------------------------- -%% Func: StateName/3 -%% Returns: {next_state, NextStateName, NextStateData} | -%% {next_state, NextStateName, NextStateData, Timeout} | -%% {reply, Reply, NextStateName, NextStateData} | -%% {reply, Reply, NextStateName, NextStateData, Timeout} | -%% {stop, Reason, NewStateData} | -%% {stop, Reason, Reply, NewStateData} -%%---------------------------------------------------------------------- -%state_name(Event, From, StateData) -> -% Reply = ok, -% {reply, Reply, state_name, StateData}. + ejabberd_router:route(From, To, Acc1), + ok. -%%---------------------------------------------------------------------- -%% Func: handle_event/3 -%% Returns: {next_state, NextStateName, NextStateData} | -%% {next_state, NextStateName, NextStateData, Timeout} | -%% {stop, Reason, NewStateData} -%%---------------------------------------------------------------------- handle_event(_Event, StateName, StateData) -> {next_state, StateName, StateData}. -%%---------------------------------------------------------------------- -%% Func: handle_sync_event/4 -%% Returns: The associated StateData for this connection -%% {reply, Reply, NextStateName, NextStateData} -%% Reply = {state_infos, [{InfoName::atom(), InfoValue::any()] -%%---------------------------------------------------------------------- --spec handle_sync_event(any(), any(), statename(), state() - ) -> {'reply', 'ok' | {'state_infos', [any(), ...]}, atom(), state()}. -handle_sync_event(get_state_infos, _From, StateName, StateData) -> - {ok, {Addr, Port}} = mongoose_transport:peername(StateData#state.socket), - Domains = case StateData#state.authenticated of - true -> - [StateData#state.auth_domain]; - false -> - Connections = StateData#state.connections, - [D || {{D, _}, established} <- - dict:to_list(Connections)] - end, - Infos = [ - {direction, in}, - {statename, StateName}, - {addr, Addr}, - {port, Port}, - {streamid, StateData#state.streamid}, - {tls, StateData#state.tls}, - {tls_enabled, StateData#state.tls_enabled}, - {tls_options, StateData#state.tls_options}, - {authenticated, StateData#state.authenticated}, - {shaper, StateData#state.shaper}, - {domains, Domains} - ], - Reply = {state_infos, Infos}, - {reply, Reply, StateName, StateData}; - -%%---------------------------------------------------------------------- -%% Func: handle_sync_event/4 -%% Returns: {next_state, NextStateName, NextStateData} | -%% {next_state, NextStateName, NextStateData, Timeout} | -%% {reply, Reply, NextStateName, NextStateData} | -%% {reply, Reply, NextStateName, NextStateData, Timeout} | -%% {stop, Reason, NewStateData} | -%% {stop, Reason, Reply, NewStateData} -%%---------------------------------------------------------------------- +-spec handle_sync_event(any(), any(), statename(), state()) -> + {reply, ok | connection_info(), statename(), state()}. +handle_sync_event(get_state_info, _From, StateName, StateData) -> + {reply, handle_get_state_info(StateName, StateData), StateName, StateData}; handle_sync_event(_Event, _From, StateName, StateData) -> - Reply = ok, - {reply, Reply, StateName, StateData}. - + {reply, ok, StateName, StateData}. code_change(_OldVsn, StateName, StateData, _Extra) -> {ok, StateName, StateData}. @@ -542,8 +503,8 @@ handle_info(_, StateName, StateData) -> %% Returns: any %%---------------------------------------------------------------------- -spec terminate(any(), statename(), state()) -> 'ok'. -terminate(Reason, _StateName, StateData) -> - ?LOG_DEBUG(#{what => s2s_in_stopped, reason => Reason}), +terminate(Reason, StateName, StateData) -> + ?LOG_DEBUG(#{what => s2s_in_stopped, reason => Reason, state_name => StateName}), mongoose_transport:close(StateData#state.socket), ok. @@ -563,11 +524,13 @@ send_element(StateData, El) -> stream_features(HostType, Domain) -> mongoose_hooks:s2s_stream_features(HostType, Domain). --spec change_shaper(state(), jid:lserver(), jid:jid()) -> any(). -change_shaper(StateData, Host, JID) -> - {ok, HostType} = mongoose_domain_api:get_host_type(Host), +-spec change_shaper(state(), ejabberd_s2s:fromto()) -> ok. +change_shaper(StateData, {LLocalServer, LRemoteServer}) -> + {ok, HostType} = mongoose_domain_api:get_host_type(LLocalServer), + JID = jid:make(<<>>, LRemoteServer, <<>>), Shaper = acl:match_rule(HostType, StateData#state.shaper, JID), - mongoose_transport:change_shaper(StateData#state.socket, Shaper). + mongoose_transport:change_shaper(StateData#state.socket, Shaper), + ok. -spec new_id() -> binary(). @@ -585,27 +548,6 @@ cancel_timer(Timer) -> ok end. - --spec is_key_packet(exml:element()) -> 'false' | {'key', _, _, _, binary()} - | {'verify', _, _, _, binary()}. -is_key_packet(#xmlel{name = Name, attrs = Attrs, - children = Els}) when Name == <<"db:result">> -> - {key, - xml:get_attr_s(<<"to">>, Attrs), - xml:get_attr_s(<<"from">>, Attrs), - xml:get_attr_s(<<"id">>, Attrs), - xml:get_cdata(Els)}; -is_key_packet(#xmlel{name = Name, attrs = Attrs, - children = Els}) when Name == <<"db:verify">> -> - {verify, - xml:get_attr_s(<<"to">>, Attrs), - xml:get_attr_s(<<"from">>, Attrs), - xml:get_attr_s(<<"id">>, Attrs), - xml:get_cdata(Els)}; -is_key_packet(_) -> - false. - - -spec match_domain(binary(), binary()) -> boolean(). match_domain(Domain, Domain) -> true; @@ -660,7 +602,7 @@ check_sasl_tls_certveify(false, _) -> check_auth_domain(error, _) -> false; check_auth_domain(AuthDomain, {ok, Cert}) -> - case ejabberd_s2s:domain_utf8_to_ascii(AuthDomain) of + case mongoose_s2s_lib:domain_utf8_to_ascii(AuthDomain) of false -> false; PCAuthDomain -> @@ -688,6 +630,7 @@ handle_auth_res(_, _, StateData) -> #xmlel{name = <<"failure">>, attrs = [{<<"xmlns">>, ?NS_SASL}]}), send_text(StateData, ?STREAM_TRAILER), + ?LOG_WARNING(#{what => s2s_in_auth_failed}), {stop, normal, StateData}. @@ -711,3 +654,41 @@ get_tls_xmlel(#state{tls_enabled = false, tls_required = true}) -> [#xmlel{name = <<"starttls">>, attrs = [{<<"xmlns">>, ?NS_TLS}], children = [#xmlel{name = <<"required">>}]}]. + +-spec is_local_host_known(ejabberd_s2s:fromto()) -> boolean(). +is_local_host_known({LLocalServer, _}) -> + mongoose_router:is_registered_route(LLocalServer) + orelse mongoose_component:has_component(LLocalServer) + orelse is_known_domain(LLocalServer). + +is_known_domain(Domain) -> + case mongoose_domain_api:get_host_type(Domain) of + {ok, _HostType} -> + true; + _ -> + false + end. + +-spec handle_get_state_info(statename(), state()) -> connection_info(). +handle_get_state_info(StateName, StateData) -> + {ok, {Addr, Port}} = mongoose_transport:peername(StateData#state.socket), + Domains = case StateData#state.authenticated of + true -> + [StateData#state.auth_domain]; + false -> + Connections = StateData#state.connections, + [LRemoteServer || {{_, LRemoteServer}, established} <- + maps:to_list(Connections)] + end, + #{pid => self(), + direction => in, + statename => StateName, + addr => Addr, + port => Port, + streamid => StateData#state.streamid, + tls => StateData#state.tls, + tls_enabled => StateData#state.tls_enabled, + tls_options => StateData#state.tls_options, + authenticated => StateData#state.authenticated, + shaper => StateData#state.shaper, + domains => Domains}. diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index 9b8a538f457..0721449ce81 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -31,11 +31,11 @@ -xep([{xep, 220}, {version, "1.1.1"}]). %% External exports --export([start/3, - start_link/3, +-export([start/2, + start_link/2, start_connection/1, - terminate_if_waiting_delay/2, - stop_connection/1]). + stop_connection/1, + terminate_if_waiting_delay/1]). %% p1_fsm callbacks (same as gen_fsm) -export([init/1, @@ -55,35 +55,62 @@ print_state/1, code_change/4]). +-export_type([connection_info/0]). + -ignore_xref([open_socket/2, print_state/1, - reopen_socket/2, start_link/3, stream_established/2, + reopen_socket/2, start_link/2, stream_established/2, wait_before_retry/2, wait_for_auth_result/2, wait_for_features/2, wait_for_starttls_proceed/2, wait_for_stream/2, wait_for_stream/2, wait_for_validation/2]). +-type verify_requester() :: false | {S2SIn :: pid(), Key :: ejabberd_s2s:s2s_dialback_key(), SID :: ejabberd_s2s:stream_id()}. + -include("mongoose.hrl"). -include("jlib.hrl"). -record(state, {socket, - streamid, - remote_streamid = <<>>, - use_v10, + streamid :: ejabberd_s2s:stream_id() | undefined, + remote_streamid = <<>> :: ejabberd_s2s:stream_id(), + use_v10 :: boolean(), tls = false :: boolean(), tls_required = false :: boolean(), tls_enabled = false :: boolean(), tls_options :: mongoose_tls:options(), authenticated = false :: boolean(), - db_enabled = true :: boolean(), + dialback_enabled = true :: boolean(), try_auth = true :: boolean(), - myname, server, queue, + from_to :: ejabberd_s2s:fromto(), + myname :: jid:lserver(), + server :: jid:lserver(), + queue :: element_queue(), host_type :: mongooseim:host_type(), - delay_to_retry = undefined_delay, - new = false :: boolean(), - verify = false :: false | {pid(), Key :: binary(), SID :: binary()}, + delay_to_retry :: non_neg_integer() | undefined, + is_registered = false :: boolean(), + verify = false :: verify_requester(), timer :: reference() }). -type state() :: #state{}. +-type connection_info() :: + #{pid => pid(), + direction => out, + statename => statename(), + addr => unknown | inet:ip_address(), + port => unknown | inet:port_number(), + streamid => ejabberd_s2s:stream_id() | undefined, + use_v10 => boolean(), + tls => boolean(), + tls_required => boolean(), + tls_enabled => boolean(), + tls_options => mongoose_tls:options(), + authenticated => boolean(), + dialback_enabled => boolean(), + try_auth => boolean(), + myname => jid:lserver(), + server => jid:lserver(), + delay_to_retry => undefined | non_neg_integer(), + verify => verify_requester()}. + -type element_queue() :: queue:queue(#xmlel{}). -type statename() :: open_socket | wait_for_stream @@ -110,9 +137,6 @@ -define(FSMOPTS, []). -endif. --define(SUPERVISOR_START, supervisor:start_child(ejabberd_s2s_out_sup, - [From, Host, Type])). - -define(FSMTIMEOUT, 30000). %% We do not block on send anymore. @@ -147,21 +171,18 @@ %%%---------------------------------------------------------------------- %%% API %%%---------------------------------------------------------------------- --spec start(_, _, _) -> {'error', _} | {'ok', 'undefined' | pid()} | {'ok', 'undefined' | pid(), _}. -start(From, Host, Type) -> - ?SUPERVISOR_START. - +-spec start(ejabberd_s2s:fromto(), _) -> {'error', _} | {'ok', 'undefined' | pid()} | {'ok', 'undefined' | pid(), _}. +start(FromTo, Type) -> + supervisor:start_child(ejabberd_s2s_out_sup, [FromTo, Type]). --spec start_link(_, _, _) -> 'ignore' | {'error', _} | {'ok', pid()}. -start_link(From, Host, Type) -> - p1_fsm:start_link(ejabberd_s2s_out, [From, Host, Type], +-spec start_link(ejabberd_s2s:fromto(), _) -> 'ignore' | {'error', _} | {'ok', pid()}. +start_link(FromTo, Type) -> + p1_fsm:start_link(ejabberd_s2s_out, [FromTo, Type], fsm_limit_opts() ++ ?FSMOPTS). - start_connection(Pid) -> p1_fsm:send_event(Pid, init). - stop_connection(Pid) -> p1_fsm:send_event(Pid, closed). @@ -176,8 +197,8 @@ stop_connection(Pid) -> %% ignore | %% {stop, StopReason} %%---------------------------------------------------------------------- --spec init([any(), ...]) -> {'ok', 'open_socket', state()}. -init([From, Server, Type]) -> +-spec init(list()) -> {'ok', 'open_socket', state()}. +init([{From, Server} = FromTo, Type]) -> process_flag(trap_exit, true), ?LOG_DEBUG(#{what => s2s_out_started, text => <<"New outgoing s2s connection">>, @@ -192,23 +213,24 @@ init([From, Server, Type]) -> {true, true} end, UseV10 = TLS, - {New, Verify} = case Type of + {IsRegistered, Verify} = case Type of new -> {true, false}; {verify, Pid, Key, SID} -> start_connection(self()), {false, {Pid, Key, SID}} end, - Timer = erlang:start_timer(ejabberd_s2s:timeout(), self(), []), + Timer = erlang:start_timer(mongoose_s2s_lib:timeout(), self(), []), {ok, open_socket, #state{use_v10 = UseV10, tls = TLS, tls_required = TLSRequired, tls_options = tls_options(HostType), queue = queue:new(), + from_to = FromTo, myname = From, host_type = HostType, server = Server, - new = New, + is_registered = IsRegistered, verify = Verify, timer = Timer}}. @@ -220,14 +242,14 @@ init([From, Server, Type]) -> %%---------------------------------------------------------------------- -spec open_socket(_, state()) -> fsm_return(). open_socket(init, StateData = #state{host_type = HostType}) -> - log_s2s_out(StateData#state.new, + log_s2s_out(StateData#state.is_registered, StateData#state.myname, StateData#state.server, StateData#state.tls), ?LOG_DEBUG(#{what => s2s_open_socket, myname => StateData#state.myname, server => StateData#state.server, - new => StateData#state.new, + is_registered => StateData#state.is_registered, verify => StateData#state.verify}), AddrList = get_addr_list(HostType, StateData#state.server), case lists:foldl(fun(_, {ok, Socket}) -> @@ -295,23 +317,23 @@ wait_for_stream({xmlstreamstart, _Name, Attrs}, StateData0) -> xml:get_attr_s(<<"xmlns:db">>, Attrs), xml:get_attr_s(<<"version">>, Attrs) == <<"1.0">>} of {<<"jabber:server">>, <<"jabber:server:dialback">>, false} -> - send_db_request(StateData); + send_dialback_request(StateData); {<<"jabber:server">>, <<"jabber:server:dialback">>, true} when StateData#state.use_v10 -> {next_state, wait_for_features, StateData, ?FSMTIMEOUT}; %% Clause added to handle Tigase's workaround for an old ejabberd bug: {<<"jabber:server">>, <<"jabber:server:dialback">>, true} when not StateData#state.use_v10 -> - send_db_request(StateData); + send_dialback_request(StateData); {<<"jabber:server">>, <<"">>, true} when StateData#state.use_v10 -> - {next_state, wait_for_features, StateData#state{db_enabled = false}, ?FSMTIMEOUT}; + {next_state, wait_for_features, StateData#state{dialback_enabled = false}, ?FSMTIMEOUT}; {NSProvided, DB, _} -> send_element(StateData, mongoose_xmpp_errors:invalid_namespace()), ?LOG_INFO(#{what => s2s_out_closing, text => <<"Closing s2s connection: (invalid namespace)">>, namespace_provided => NSProvided, namespace_expected => <<"jabber:server">>, - xmlnsdb_provided => DB, + xmlns_dialback_provided => DB, all_attributes => Attrs, myname => StateData#state.myname, server => StateData#state.server}), {stop, normal, StateData} @@ -329,13 +351,28 @@ wait_for_stream(closed, StateData) -> -spec wait_for_validation(ejabberd:xml_stream_item(), state()) -> fsm_return(). -wait_for_validation({xmlstreamelement, El}, StateData) -> - case is_verify_res(El) of - {result, To, From, Id, Type} -> +wait_for_validation({xmlstreamelement, El}, StateData = #state{from_to = FromTo}) -> + case mongoose_s2s_dialback:parse_validity(El) of + {step_3, FromTo, StreamID, IsValid} -> + ?LOG_DEBUG(#{what => s2s_receive_verify, + from_to => FromTo, stream_id => StreamID, is_valid => IsValid}), + case StateData#state.verify of + false -> + %% This is unexpected condition. + %% We've received step_3 reply, but there is no matching outgoing connection. + %% We could close the connection here. + next_state(wait_for_validation, StateData); + {Pid, _Key, _SID} -> + ejabberd_s2s_in:send_validity_from_s2s_out(Pid, IsValid, FromTo), + next_state(wait_for_validation, StateData) + end; + {step_4, FromTo, StreamID, IsValid} -> ?LOG_DEBUG(#{what => s2s_receive_result, - from => From, to => To, message_id => Id, type => Type}), - case {Type, StateData#state.tls_enabled, StateData#state.tls_required} of - {<<"valid">>, Enabled, Required} when (Enabled==true) or (Required==false) -> + from_to => FromTo, stream_id => StreamID, is_valid => IsValid}), + #state{tls_enabled = Enabled, tls_required = Required} = StateData, + case IsValid of + true when (Enabled==true) or (Required==false) -> + %% Initiating server receives valid verification result from receiving server (Step 4) send_queue(StateData, StateData#state.queue), ?LOG_INFO(#{what => s2s_out_connected, text => <<"New outgoing s2s connection established">>, @@ -343,30 +380,14 @@ wait_for_validation({xmlstreamelement, El}, StateData) -> myname => StateData#state.myname, server => StateData#state.server}), {next_state, stream_established, StateData#state{queue = queue:new()}}; - {<<"valid">>, Enabled, Required} when (Enabled==false) and (Required==true) -> + true when (Enabled==false) and (Required==true) -> %% TODO: bounce packets ?CLOSE_GENERIC(wait_for_validation, tls_required_but_unavailable, El, StateData); _ -> %% TODO: bounce packets ?CLOSE_GENERIC(wait_for_validation, invalid_dialback_key, El, StateData) end; - {verify, To, From, Id, Type} -> - ?LOG_DEBUG(#{what => s2s_receive_verify, - from => From, to => To, message_id => Id, type => Type}), - case StateData#state.verify of - false -> - NextState = wait_for_validation, - %% TODO: Should'nt we close the connection here ? - {next_state, NextState, StateData, - get_timeout_interval(NextState)}; - {Pid, _Key, _SID} -> - send_event(Type, Pid, StateData), - NextState = wait_for_validation, - {next_state, NextState, StateData, - get_timeout_interval(NextState)} - - end; - _ -> + false -> {next_state, wait_for_validation, StateData, ?FSMTIMEOUT*3} end; wait_for_validation({xmlstreamend, _Name}, StateData) -> @@ -453,10 +474,10 @@ wait_for_auth_result({xmlstreamelement, El}, StateData) -> #xmlel{name = <<"failure">>, attrs = Attrs} -> case xml:get_attr_s(<<"xmlns">>, Attrs) of ?NS_SASL -> - ?LOG_INFO(#{what => s2s_auth_failure, - text => <<"Received failure result in ejabberd_s2s_out. Restarting">>, - myname => StateData#state.myname, - server => StateData#state.server}), + ?LOG_WARNING(#{what => s2s_auth_failure, + text => <<"Received failure result in ejabberd_s2s_out. Restarting">>, + myname => StateData#state.myname, + server => StateData#state.server}), mongoose_transport:close(StateData#state.socket), {next_state, reopen_socket, StateData#state{socket = undefined}, ?FSMTIMEOUT}; @@ -540,21 +561,23 @@ wait_before_retry(_Event, StateData) -> {next_state, wait_before_retry, StateData, ?FSMTIMEOUT}. -spec stream_established(ejabberd:xml_stream_item(), state()) -> fsm_return(). -stream_established({xmlstreamelement, El}, StateData) -> +stream_established({xmlstreamelement, El}, StateData = #state{from_to = FromTo}) -> ?LOG_DEBUG(#{what => s2s_out_stream_established, exml_packet => El, myname => StateData#state.myname, server => StateData#state.server}), - case is_verify_res(El) of - {verify, VTo, VFrom, VId, VType} -> + case mongoose_s2s_dialback:parse_validity(El) of + {step_3, FromTo, StreamID, IsValid} -> ?LOG_DEBUG(#{what => s2s_recv_verify, - to => VTo, from => VFrom, message_id => VId, type => VType, + from_to => FromTo, stream_id => StreamID, is_valid => IsValid, myname => StateData#state.myname, server => StateData#state.server}), case StateData#state.verify of {VPid, _VKey, _SID} -> - send_event(VType, VPid, StateData); + ejabberd_s2s_in:send_validity_from_s2s_out(VPid, IsValid, FromTo); _ -> ok end; - _ -> + {step_4, _FromTo, _StreamID, _IsValid} -> + ok; + false -> ok end, {next_state, stream_established, StateData}; @@ -590,55 +613,12 @@ stream_established(closed, StateData) -> %% {stop, Reason, NewStateData} %%---------------------------------------------------------------------- handle_event(_Event, StateName, StateData) -> - {next_state, StateName, StateData, get_timeout_interval(StateName)}. - -%%---------------------------------------------------------------------- -%% Func: handle_sync_event/4 -%% Returns: The associated StateData for this connection -%% {reply, Reply, NextStateName, NextStateData} -%% Reply = {state_infos, [{InfoName::atom(), InfoValue::any()] -%%---------------------------------------------------------------------- -handle_sync_event(get_state_infos, _From, StateName, StateData) -> - {Addr, Port} = try mongoose_transport:peername(StateData#state.socket) of - {ok, {A, P}} -> {A, P} - catch - _:_ -> - {unknown, unknown} - end, - Infos = [ - {direction, out}, - {statename, StateName}, - {addr, Addr}, - {port, Port}, - {streamid, StateData#state.streamid}, - {use_v10, StateData#state.use_v10}, - {tls, StateData#state.tls}, - {tls_required, StateData#state.tls_required}, - {tls_enabled, StateData#state.tls_enabled}, - {tls_options, StateData#state.tls_options}, - {authenticated, StateData#state.authenticated}, - {db_enabled, StateData#state.db_enabled}, - {try_auth, StateData#state.try_auth}, - {myname, StateData#state.myname}, - {server, StateData#state.server}, - {delay_to_retry, StateData#state.delay_to_retry}, - {verify, StateData#state.verify} - ], - Reply = {state_infos, Infos}, - {reply, Reply, StateName, StateData}; + next_state(StateName, StateData). -%%---------------------------------------------------------------------- -%% Func: handle_sync_event/4 -%% Returns: {next_state, NextStateName, NextStateData} | -%% {next_state, NextStateName, NextStateData, Timeout} | -%% {reply, Reply, NextStateName, NextStateData} | -%% {reply, Reply, NextStateName, NextStateData, Timeout} | -%% {stop, Reason, NewStateData} | -%% {stop, Reason, Reply, NewStateData} -%%---------------------------------------------------------------------- +handle_sync_event(get_state_info, _From, StateName, StateData) -> + {reply, handle_get_state_info(StateName, StateData), StateName, StateData}; handle_sync_event(_Event, _From, StateName, StateData) -> - Reply = ok, - {reply, Reply, StateName, StateData, get_timeout_interval(StateName)}. + {reply, ok, StateName, StateData, get_timeout_interval(StateName)}. code_change(_OldVsn, StateName, StateData, _Extra) -> @@ -654,7 +634,7 @@ handle_info({send_element, Acc, El}, StateName, StateData) -> case StateName of stream_established -> cancel_timer(StateData#state.timer), - Timer = erlang:start_timer(ejabberd_s2s:timeout(), self(), []), + Timer = erlang:start_timer(mongoose_s2s_lib:timeout(), self(), []), send_element(StateData, El), {next_state, StateName, StateData#state{timer = Timer}}; %% In this state we bounce all message: We are waiting before @@ -664,8 +644,7 @@ handle_info({send_element, Acc, El}, StateName, StateData) -> {next_state, StateName, StateData}; _ -> Q = queue:in({Acc, El}, StateData#state.queue), - {next_state, StateName, StateData#state{queue = Q}, - get_timeout_interval(StateName)} + next_state(StateName, StateData#state{queue = Q}) end; handle_info({timeout, Timer, _}, wait_before_retry, #state{timer = Timer} = StateData) -> @@ -680,9 +659,9 @@ handle_info({timeout, Timer, _}, StateName, handle_info(terminate_if_waiting_before_retry, wait_before_retry, StateData) -> ?CLOSE_GENERIC(wait_before_retry, terminate_if_waiting_before_retry, StateData); handle_info(terminate_if_waiting_before_retry, StateName, StateData) -> - {next_state, StateName, StateData, get_timeout_interval(StateName)}; + next_state(StateName, StateData); handle_info(_, StateName, StateData) -> - {next_state, StateName, StateData, get_timeout_interval(StateName)}. + next_state(StateName, StateData). %%---------------------------------------------------------------------- %% Func: terminate/3 @@ -693,7 +672,7 @@ terminate(Reason, StateName, StateData) -> ?LOG_DEBUG(#{what => s2s_out_closed, text => <<"ejabberd_s2s_out terminated">>, reason => Reason, state_name => StateName, myname => StateData#state.myname, server => StateData#state.server}), - case StateData#state.new of + case StateData#state.is_registered of false -> ok; true -> @@ -703,6 +682,15 @@ terminate(Reason, StateName, StateData) -> E = mongoose_xmpp_errors:remote_server_not_found(<<"en">>, <<"Bounced by s2s">>), %% bounce queue manage by process and Erlang message queue bounce_queue(StateData#state.queue, E), + case queue:is_empty(StateData#state.queue) of + true -> + ok; + false -> + ?LOG_WARNING(#{what => s2s_terminate_non_empty, + state_name => StateName, reason => Reason, + queue => lists:sublist(queue:to_list(StateData#state.queue), 10), + authenticated => StateData#state.authenticated}) + end, bounce_messages(E), case StateData#state.socket of undefined -> @@ -802,72 +790,44 @@ bounce_messages(Error) -> end. --spec send_db_request(state()) -> fsm_return(). -send_db_request(StateData) -> - Server = StateData#state.server, - New = case StateData#state.new of +-spec send_dialback_request(state()) -> fsm_return(). +send_dialback_request(StateData) -> + IsRegistered = case StateData#state.is_registered of false -> - ejabberd_s2s:try_register( - {StateData#state.myname, Server}); + ejabberd_s2s:try_register(StateData#state.from_to); true -> true end, - NewStateData = StateData#state{new = New}, + NewStateData = StateData#state{is_registered = IsRegistered}, try - case New of + case IsRegistered of false -> + %% Still not registered in the s2s table as an outgoing connection ok; true -> Key1 = ejabberd_s2s:key( StateData#state.host_type, - {StateData#state.myname, Server}, + StateData#state.from_to, StateData#state.remote_streamid), - send_element(StateData, - #xmlel{name = <<"db:result">>, - attrs = [{<<"from">>, StateData#state.myname}, - {<<"to">>, Server}], - children = [#xmlcdata{content = Key1}]}) + %% Initiating server sends dialback key + send_element(StateData, mongoose_s2s_dialback:step_1(StateData#state.from_to, Key1)) end, case StateData#state.verify of false -> ok; {_Pid, Key2, SID} -> - send_element(StateData, - #xmlel{name = <<"db:verify">>, - attrs = [{<<"from">>, StateData#state.myname}, - {<<"to">>, StateData#state.server}, - {<<"id">>, SID}], - children = [#xmlcdata{content = Key2}]}) + %% Receiving server sends verification request + send_element(StateData, mongoose_s2s_dialback:step_2(StateData#state.from_to, Key2, SID)) end, {next_state, wait_for_validation, NewStateData, ?FSMTIMEOUT*6} catch Class:Reason:Stacktrace -> - ?LOG_ERROR(#{what => s2s_out_send_db_request_failed, + ?LOG_ERROR(#{what => s2s_out_send_dialback_request_failed, class => Class, reason => Reason, stacktrace => Stacktrace, myname => StateData#state.myname, server => StateData#state.server}), {stop, normal, NewStateData} end. - --spec is_verify_res(exml:element()) -> 'false' | {'result', _, _, _, _} | {'verify', _, _, _, _}. -is_verify_res(#xmlel{name = Name, - attrs = Attrs}) when Name == <<"db:result">> -> - {result, - xml:get_attr_s(<<"to">>, Attrs), - xml:get_attr_s(<<"from">>, Attrs), - xml:get_attr_s(<<"id">>, Attrs), - xml:get_attr_s(<<"type">>, Attrs)}; -is_verify_res(#xmlel{name = Name, - attrs = Attrs}) when Name == <<"db:verify">> -> - {verify, - xml:get_attr_s(<<"to">>, Attrs), - xml:get_attr_s(<<"from">>, Attrs), - xml:get_attr_s(<<"id">>, Attrs), - xml:get_attr_s(<<"type">>, Attrs)}; -is_verify_res(_) -> - false. - - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% SRV support @@ -875,7 +835,7 @@ is_verify_res(_) -> -spec lookup_services(mongooseim:host_type(), jid:lserver()) -> [addr()]. lookup_services(HostType, Server) -> - case ejabberd_s2s:domain_utf8_to_ascii(Server) of + case mongoose_s2s_lib:domain_utf8_to_ascii(Server) of false -> []; ASCIIAddr -> do_lookup_services(HostType, ASCIIAddr) end. @@ -997,6 +957,10 @@ log_s2s_out(_, Myname, Server, Tls) -> text => <<"Trying to open s2s connection">>, myname => Myname, server => Server, tls => Tls}). +next_state(StateName, StateData) -> + {next_state, StateName, StateData, + get_timeout_interval(StateName)}. + %% @doc Calculate timeout depending on which state we are in: %% Can return integer > 0 | infinity -spec get_timeout_interval(statename()) -> 'infinity' | non_neg_integer(). @@ -1023,7 +987,7 @@ wait_before_reconnect(StateData) -> bounce_messages(E), cancel_timer(StateData#state.timer), Delay = case StateData#state.delay_to_retry of - undefined_delay -> + undefined -> %% The initial delay is random between 1 and 15 seconds %% Return a random integer between 1000 and 15000 MicroSecs = erlang:system_time(microsecond), @@ -1047,9 +1011,9 @@ get_max_retry_delay(HostType) -> %% @doc Terminate s2s_out connections that are in state wait_before_retry -terminate_if_waiting_delay(From, To) -> - FromTo = {From, To}, - Pids = ejabberd_s2s:get_connections_pids(FromTo), +-spec terminate_if_waiting_delay(ejabberd_s2s:fromto()) -> ok. +terminate_if_waiting_delay(FromTo) -> + Pids = ejabberd_s2s:get_s2s_out_pids(FromTo), lists:foreach( fun(Pid) -> Pid ! terminate_if_waiting_before_retry @@ -1092,17 +1056,6 @@ get_predefined_port(HostType, _Addr) -> outgoing_s2s_port(HostType). addr_type(Addr) when tuple_size(Addr) =:= 4 -> inet; addr_type(Addr) when tuple_size(Addr) =:= 8 -> inet6. -send_event(<<"valid">>, Pid, StateData) -> - p1_fsm:send_event( - Pid, {valid, - StateData#state.server, - StateData#state.myname}); -send_event(_, Pid, StateData) -> - p1_fsm:send_event( - Pid, {invalid, - StateData#state.server, - StateData#state.myname}). - get_acc_with_new_sext(?NS_SASL, Els1, {_SEXT, STLS, STLSReq}) -> NewSEXT = lists:any( @@ -1130,7 +1083,7 @@ get_acc_with_new_tls(_, _, Acc) -> tls_options(HostType) -> Ciphers = mongoose_config:get_opt([{s2s, HostType}, ciphers]), Options = #{verify_mode => peer, ciphers => Ciphers}, - case ejabberd_s2s:lookup_certfile(HostType) of + case mongoose_s2s_lib:lookup_certfile(HostType) of {ok, CertFile} -> Options#{certfile => CertFile}; {error, not_found} -> Options end. @@ -1149,8 +1102,7 @@ handle_parsed_features({false, false, _, StateData = #state{authenticated = true myname => StateData#state.myname, server => StateData#state.server}), {next_state, stream_established, StateData#state{queue = queue:new()}}; -handle_parsed_features({true, _, _, StateData = #state{try_auth = true, new = New}}) when - New /= false -> +handle_parsed_features({true, _, _, StateData = #state{try_auth = true, is_registered = true}}) -> send_element(StateData, #xmlel{name = <<"auth">>, attrs = [{<<"xmlns">>, ?NS_SASL}, @@ -1173,8 +1125,8 @@ handle_parsed_features({_, _, true, StateData = #state{tls = false}}) -> {next_state, reopen_socket, StateData#state{socket = undefined, use_v10 = false}, ?FSMTIMEOUT}; -handle_parsed_features({_, _, _, StateData = #state{db_enabled = true}}) -> - send_db_request(StateData); +handle_parsed_features({_, _, _, StateData = #state{dialback_enabled = true}}) -> + send_dialback_request(StateData); handle_parsed_features({_, _, _, StateData}) -> ?LOG_DEBUG(#{what => s2s_out_restarted, myname => StateData#state.myname, server => StateData#state.server}), @@ -1182,3 +1134,30 @@ handle_parsed_features({_, _, _, StateData}) -> mongoose_transport:close(StateData#state.socket), {next_state, reopen_socket, StateData#state{socket = undefined, use_v10 = false}, ?FSMTIMEOUT}. + +handle_get_state_info(StateName, StateData) -> + {Addr, Port} = get_peername(StateData#state.socket), + #{pid => self(), + direction => out, + statename => StateName, + addr => Addr, + port => Port, + streamid => StateData#state.streamid, + use_v10 => StateData#state.use_v10, + tls => StateData#state.tls, + tls_required => StateData#state.tls_required, + tls_enabled => StateData#state.tls_enabled, + tls_options => StateData#state.tls_options, + authenticated => StateData#state.authenticated, + dialback_enabled => StateData#state.dialback_enabled, + try_auth => StateData#state.try_auth, + myname => StateData#state.myname, + server => StateData#state.server, + delay_to_retry => StateData#state.delay_to_retry, + verify => StateData#state.verify}. + +get_peername(undefined) -> + {unknown, unknown}; +get_peername(Socket) -> + {ok, {Addr, Port}} = mongoose_transport:peername(Socket), + {Addr, Port}. diff --git a/src/ejabberd_service.erl b/src/ejabberd_service.erl index ccdf635e08c..fff2af1c59f 100644 --- a/src/ejabberd_service.erl +++ b/src/ejabberd_service.erl @@ -71,7 +71,8 @@ hidden_components = false :: boolean(), conflict_behaviour :: conflict_behaviour(), access, - check_from + check_from, + components = [] :: [mongoose_component:external_component()] }). -type state() :: #state{}. @@ -450,9 +451,9 @@ try_register_routes(StateData) -> try_register_routes(StateData, Retries) -> case register_routes(StateData) of - ok -> + {ok, Components} -> send_element(StateData, #xmlel{name = <<"handshake">>}), - {next_state, stream_established, StateData}; + {next_state, stream_established, StateData#state{components = Components}}; {error, Reason} -> RoutesInfo = lookup_routes(StateData), ConflictBehaviour = StateData#state.conflict_behaviour, @@ -474,7 +475,8 @@ routes_info_to_pids(RoutesInfo) -> mongoose_packet_handler:module(H) =:= ?MODULE]. handle_registration_conflict(kick_old, RoutesInfo, StateData, Retries) when Retries > 0 -> - Pids = routes_info_to_pids(RoutesInfo), + %% see lookup_routes + Pids = lists:usort(routes_info_to_pids(RoutesInfo)), Results = lists:map(fun stop_process/1, Pids), AllOk = lists:all(fun(Result) -> Result =:= ok end, Results), case AllOk of @@ -497,18 +499,18 @@ do_disconnect_on_conflict(StateData) -> lookup_routes(StateData) -> Routes = get_routes(StateData), - [{Route, ejabberd_router:lookup_component(Route)} || Route <- Routes]. + %% Lookup for all pids for the route (both local and global) + [{Route, mongoose_component:lookup_component(Route)} || Route <- Routes]. -spec register_routes(state()) -> any(). register_routes(StateData = #state{hidden_components = AreHidden}) -> Routes = get_routes(StateData), Handler = mongoose_packet_handler:new(?MODULE, #{pid => self()}), - ejabberd_router:register_components(Routes, node(), Handler, AreHidden). + mongoose_component:register_components(Routes, node(), Handler, AreHidden). -spec unregister_routes(state()) -> any(). -unregister_routes(StateData) -> - Routes = get_routes(StateData), - ejabberd_router:unregister_components(Routes). +unregister_routes(#state{components = Components}) -> + mongoose_component:unregister_components(Components). get_routes(#state{host=Subdomain, is_subdomain=true}) -> Hosts = mongoose_config:get_opt(hosts), diff --git a/src/ejabberd_sm.erl b/src/ejabberd_sm.erl index c35f7c7abc0..38d6732586f 100644 --- a/src/ejabberd_sm.erl +++ b/src/ejabberd_sm.erl @@ -106,7 +106,7 @@ }. -type info() :: #{info_key() => any()}. --type backend() :: ejabberd_sm_mnesia | ejabberd_sm_redis. +-type backend() :: ejabberd_sm_mnesia | ejabberd_sm_redis | ejabberd_sm_cets. -type close_reason() :: resumed | normal | replaced. -type info_key() :: atom(). @@ -138,10 +138,6 @@ start() -> -spec start_link() -> 'ignore' | {'error', _} | {'ok', pid()}. start_link() -> - mongoose_metrics:ensure_metric(global, ?UNIQUE_COUNT_CACHE, gauge), - mongoose_metrics:create_probe_metric(global, totalSessionCount, mongoose_metrics_probe_total_sessions), - mongoose_metrics:create_probe_metric(global, uniqueSessionCount, mongoose_metrics_probe_unique_sessions), - mongoose_metrics:create_probe_metric(global, nodeSessionCount, mongoose_metrics_probe_node_sessions), gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). @@ -446,8 +442,16 @@ init([]) -> lists:foreach(fun(HostType) -> gen_hook:add_handlers(hooks(HostType)) end, ?ALL_HOST_TYPES), ejabberd_commands:register_commands(commands()), + %% Create metrics after backend has started, otherwise probe could have null value + create_metrics(), {ok, #state{}}. +create_metrics() -> + mongoose_metrics:ensure_metric(global, ?UNIQUE_COUNT_CACHE, gauge), + mongoose_metrics:create_probe_metric(global, totalSessionCount, mongoose_metrics_probe_total_sessions), + mongoose_metrics:create_probe_metric(global, uniqueSessionCount, mongoose_metrics_probe_unique_sessions), + mongoose_metrics:create_probe_metric(global, nodeSessionCount, mongoose_metrics_probe_node_sessions). + -spec hooks(binary()) -> [gen_hook:hook_tuple()]. hooks(HostType) -> [ diff --git a/src/ejabberd_sm_cets.erl b/src/ejabberd_sm_cets.erl new file mode 100644 index 00000000000..b602414797e --- /dev/null +++ b/src/ejabberd_sm_cets.erl @@ -0,0 +1,120 @@ +-module(ejabberd_sm_cets). + +-behavior(ejabberd_sm_backend). + +-include("mongoose.hrl"). +-include("session.hrl"). + +-export([init/1, + get_sessions/0, + get_sessions/1, + get_sessions/2, + get_sessions/3, + set_session/4, + delete_session/4, + cleanup/1, + total_count/0, + unique_count/0]). + +-define(TABLE, cets_session). + +-spec init(map()) -> any(). +init(_Opts) -> + cets:start(?TABLE, #{}), + cets_discovery:add_table(mongoose_cets_discovery, ?TABLE). + +-spec get_sessions() -> [ejabberd_sm:session()]. +get_sessions() -> + tuples_to_sessions(ets:tab2list(?TABLE)). + +-spec get_sessions(jid:lserver()) -> [ejabberd_sm:session()]. +get_sessions(Server) -> + %% This is not a full table scan. From the ETS docs: + %% For ordered_set a partially bound key will limit the traversal to only + %% scan a subset of the table based on term order. + %% A partially bound key is either a list or a tuple with + %% a prefix that is fully bound. + R = {{Server, '_', '_', '_'}, '_', '_'}, + Xs = ets:match_object(?TABLE, R), + tuples_to_sessions(Xs). + +-spec get_sessions(jid:luser(), jid:lserver()) -> [ejabberd_sm:session()]. +get_sessions(User, Server) -> + R = {{Server, User, '_', '_'}, '_', '_'}, + Xs = ets:match_object(?TABLE, R), + tuples_to_sessions(Xs). + +-spec get_sessions(jid:luser(), jid:lserver(), jid:lresource()) -> + [ejabberd_sm:session()]. +get_sessions(User, Server, Resource) -> + R = {{Server, User, Resource, '_'}, '_', '_'}, + Xs = ets:match_object(?TABLE, R), + %% TODO these sessions should be deduplicated. + %% It is possible, that after merging two cets tables we could end up + %% with sessions from two nodes for the same full jid. + %% One of the sessions must be killed. + %% We can detect duplicates on the merging step or on reading (or both). + tuples_to_sessions(Xs). + +-spec set_session(User :: jid:luser(), + Server :: jid:lserver(), + Resource :: jid:lresource(), + Session :: ejabberd_sm:session()) -> ok | {error, term()}. +set_session(_User, _Server, _Resource, Session) -> + cets:insert(?TABLE, session_to_tuple(Session)). + +-spec delete_session(SID :: ejabberd_sm:sid(), + User :: jid:luser(), + Server :: jid:lserver(), + Resource :: jid:lresource()) -> ok. +delete_session(SID, User, Server, Resource) -> + cets:delete(?TABLE, make_key(User, Server, Resource, SID)). + +%% cleanup is called on each node in the cluster, when Node is down +-spec cleanup(atom()) -> any(). +cleanup(Node) -> + KeyPattern = {'_', '_', '_', {'_', '$1'}}, + Guard = {'==', {node, '$1'}, Node}, + R = {KeyPattern, '_', '_'}, + cets:sync(?TABLE), + %% This is a full table scan, but cleanup is rare. + Tuples = ets:select(?TABLE, [{R, [Guard], ['$_']}]), + lists:foreach(fun({_Key, _, _} = Tuple) -> + Session = tuple_to_session(Tuple), + ejabberd_sm:run_session_cleanup_hook(Session) + end, Tuples), + %% We don't need to replicate deletes + %% We remove the local content here + ets:select_delete(?TABLE, [{R, [Guard], [true]}]). + +-spec total_count() -> integer(). +total_count() -> + ets:info(?TABLE, size). + +%% Counts merged by US +-spec unique_count() -> integer(). +unique_count() -> + compute_unique(ets:first(?TABLE), 0). + +compute_unique('$end_of_table', Sum) -> + Sum; +compute_unique({S, U, _, _} = Key, Sum) -> + Key2 = ets:next(?TABLE, Key), + case Key2 of + {S, U, _, _} -> + compute_unique(Key2, Sum); + _ -> + compute_unique(Key2, Sum + 1) + end. + +session_to_tuple(#session{sid = SID, usr = {U, S, R}, priority = Prio, info = Info}) -> + {make_key(U, S, R, SID), Prio, Info}. + +make_key(User, Server, Resource, SID) -> + {Server, User, Resource, SID}. + +tuple_to_session({{S, U, R, SID}, Prio, Info}) -> + #session{sid = SID, usr = {U, S, R}, us = {U, S}, priority = Prio, info = Info}. + +tuples_to_sessions(Xs) -> + [tuple_to_session(X) || X <- Xs]. diff --git a/src/ejabberd_sup.erl b/src/ejabberd_sup.erl index af427fecbac..16c9d3aa7b8 100644 --- a/src/ejabberd_sup.erl +++ b/src/ejabberd_sup.erl @@ -29,7 +29,7 @@ -behaviour(supervisor). -export([start_link/0, init/1]). --export([start_child/1, stop_child/1]). +-export([start_child/1, start_child/2, stop_child/1]). -include("mongoose_logger.hrl"). @@ -156,12 +156,19 @@ init([]) -> PG = {pg, {pg, start_link, [mim_scope]}, - permanent, infinity, supervisor, [pg]}, + permanent, infinity, worker, [pg]}, + StartIdServer = + {mongoose_start_node_id, + {mongoose_start_node_id, start_link, []}, + permanent, infinity, worker, [mongoose_start_node_id]}, {ok, {{one_for_one, 10, 1}, - [PG, + [StartIdServer, + PG, Hooks, Cleaner, SMBackendSupervisor, + OutgoingPoolsSupervisor + ] ++ mongoose_cets_discovery:supervisor_specs() ++ [ Router, S2S, Local, @@ -170,7 +177,6 @@ init([]) -> S2SInSupervisor, S2SOutSupervisor, ServiceSupervisor, - OutgoingPoolsSupervisor, IQSupervisor, Listener, MucIQ, @@ -178,12 +184,18 @@ init([]) -> DomainSup]}}. start_child(ChildSpec) -> - case supervisor:start_child(ejabberd_sup, ChildSpec) of + start_child(ejabberd_sup, ChildSpec). + +%% This function handles error results from supervisor:start_child +%% It does some logging +start_child(SupName, ChildSpec) -> + case supervisor:start_child(SupName, ChildSpec) of {ok, Pid} -> {ok, Pid}; Other -> Stacktrace = element(2, erlang:process_info(self(), current_stacktrace)), ?LOG_ERROR(#{what => start_child_failed, spec => ChildSpec, + supervisor_name => SupName, reason => Other, stacktrace => Stacktrace}), erlang:error({start_child_failed, Other, ChildSpec}) end. diff --git a/src/global_distrib/mod_global_distrib_mapping.erl b/src/global_distrib/mod_global_distrib_mapping.erl index a380da345eb..3cbb7700956 100644 --- a/src/global_distrib/mod_global_distrib_mapping.erl +++ b/src/global_distrib/mod_global_distrib_mapping.erl @@ -133,7 +133,7 @@ hosts() -> %%-------------------------------------------------------------------- -spec start(mongooseim:host_type(), gen_mod:module_opts()) -> any(). -start(HostType, Opts = #{cache := CacheOpts}) -> +start(_HostType, Opts = #{cache := CacheOpts}) -> mod_global_distrib_mapping_backend:start(Opts#{backend => redis}), mongoose_metrics:ensure_metric(global, ?GLOBAL_DISTRIB_MAPPING_FETCH_TIME, histogram), @@ -153,7 +153,7 @@ start(HostType, Opts = #{cache := CacheOpts}) -> {max_size, MaxJids}]). -spec stop(mongooseim:host_type()) -> any(). -stop(HostType) -> +stop(_HostType) -> ets_cache:delete(?JID_TAB), ets_cache:delete(?DOMAIN_TAB), mod_global_distrib_mapping_backend:stop(). diff --git a/src/graphql/admin/mongoose_graphql_admin_query.erl b/src/graphql/admin/mongoose_graphql_admin_query.erl index faf446b2a2f..6a8eae5d231 100644 --- a/src/graphql/admin/mongoose_graphql_admin_query.erl +++ b/src/graphql/admin/mongoose_graphql_admin_query.erl @@ -21,6 +21,8 @@ execute(_Ctx, _Obj, <<"metric">>, _Args) -> {ok, metric}; execute(_Ctx, _Obj, <<"mnesia">>, _Args) -> {ok, mnesia}; +execute(_Ctx, _Obj, <<"cets">>, _Args) -> + {ok, cets}; execute(_Ctx, _Obj, <<"muc">>, _Args) -> {ok, muc}; execute(_Ctx, _Obj, <<"muc_light">>, _Args) -> diff --git a/src/graphql/admin/mongoose_graphql_cets_admin_query.erl b/src/graphql/admin/mongoose_graphql_cets_admin_query.erl new file mode 100644 index 00000000000..394dbcc4c28 --- /dev/null +++ b/src/graphql/admin/mongoose_graphql_cets_admin_query.erl @@ -0,0 +1,23 @@ +-module(mongoose_graphql_cets_admin_query). +-behaviour(mongoose_graphql). + +-export([execute/4]). + +-import(mongoose_graphql_helper, [make_error/2]). + +-ignore_xref([execute/4]). + +-include("../mongoose_graphql_types.hrl"). + +execute(Ctx, cets, <<"systemInfo">>, _) -> + try cets_discovery:info(mongoose_cets_discovery) of + Tables -> + {ok, lists:map(fun process_result/1, Tables)} + catch _Class:Reason -> + make_error({Reason, <<"Failed to get CETS tables info">>}, Ctx) + end. + +process_result(#{memory := Memory, size := Size, nodes := Nodes, table := Tab}) -> + Nodes2 = [{ok, Node} || Node <- Nodes], + {ok, #{<<"memory">> => Memory, <<"size">> => Size, + <<"nodes">> => Nodes2, <<"tableName">> => Tab}}. diff --git a/src/graphql/mongoose_graphql.erl b/src/graphql/mongoose_graphql.erl index 6eaf02d69c9..cdff5cdd5ed 100644 --- a/src/graphql/mongoose_graphql.erl +++ b/src/graphql/mongoose_graphql.erl @@ -194,6 +194,7 @@ admin_mapping_rules() -> 'MUCLightAdminQuery' => mongoose_graphql_muc_light_admin_query, 'MnesiaAdminMutation' => mongoose_graphql_mnesia_admin_mutation, 'MnesiaAdminQuery' => mongoose_graphql_mnesia_admin_query, + 'CETSAdminQuery' => mongoose_graphql_cets_admin_query, 'OfflineAdminMutation' => mongoose_graphql_offline_admin_mutation, 'PrivateAdminMutation' => mongoose_graphql_private_admin_mutation, 'PrivateAdminQuery' => mongoose_graphql_private_admin_query, diff --git a/src/hooks/mongoose_hooks.erl b/src/hooks/mongoose_hooks.erl index f1f9719f871..db1f243763e 100644 --- a/src/hooks/mongoose_hooks.erl +++ b/src/hooks/mongoose_hooks.erl @@ -138,9 +138,10 @@ mod_global_distrib_unknown_recipient/2]). -export([remove_domain/2, - node_cleanup/1]). + node_cleanup/1, + node_cleanup_for_host_type/2]). --ignore_xref([node_cleanup/1, remove_domain/2]). +-ignore_xref([remove_domain/2]). -ignore_xref([mam_archive_sync/1, mam_muc_archive_sync/1]). %% Just a map, used by some hooks as a first argument. @@ -217,6 +218,11 @@ node_cleanup(Node) -> Params = #{node => Node}, run_global_hook(node_cleanup, #{}, Params). +-spec node_cleanup_for_host_type(HostType :: mongooseim:host_type(), Node :: node()) -> Acc :: map(). +node_cleanup_for_host_type(HostType, Node) -> + Params = #{node => Node}, + run_hook_for_host_type(node_cleanup_for_host_type, HostType, #{}, Params). + -spec failed_to_store_message(Acc) -> Result when Acc :: mongoose_acc:t(), Result :: mongoose_acc:t(). diff --git a/src/inbox/mod_inbox.erl b/src/inbox/mod_inbox.erl index be60c04469d..cc4735fe608 100644 --- a/src/inbox/mod_inbox.erl +++ b/src/inbox/mod_inbox.erl @@ -170,12 +170,9 @@ process_inbox_boxes(Config = #{boxes := Boxes}) -> %% Cleaner gen_server callbacks start_cleaner(HostType, #{bin_ttl := TTL, bin_clean_after := Interval}) -> - Name = gen_mod:get_module_proc(HostType, ?MODULE), WOpts = #{host_type => HostType, action => fun mod_inbox_api:flush_global_bin/2, opts => TTL, interval => Interval}, - MFA = {mongoose_collector, start_link, [Name, WOpts]}, - ChildSpec = {Name, MFA, permanent, 5000, worker, [?MODULE]}, - ejabberd_sup:start_child(ChildSpec). + mongoose_collector:start_common(?MODULE, HostType, WOpts). stop_cleaner(HostType) -> Name = gen_mod:get_module_proc(HostType, ?MODULE), diff --git a/src/jingle_sip/jingle_sip_callbacks.erl b/src/jingle_sip/jingle_sip_callbacks.erl index b4e06a5ecd2..73dec3b9f77 100644 --- a/src/jingle_sip/jingle_sip_callbacks.erl +++ b/src/jingle_sip/jingle_sip_callbacks.erl @@ -102,7 +102,7 @@ translate_and_deliver_invite(Req, FromJID, FromBinary, ToJID, ToBinary) -> JingleEl = jingle_sip_helper:jingle_element(CallID, <<"session-initiate">>, ContentEls ++ OtherEls), - ok = mod_jingle_sip_backend:set_incoming_request(CallID, ReqID, FromJID, ToJID, JingleEl), + ok = mod_jingle_sip_session:set_incoming_request(CallID, ReqID, FromJID, ToJID, JingleEl), ?LOG_INFO(#{what => sip_invite, text => <<"Got SIP INVITE from NkSIP">>, from_jid => FromBinary, to_jid => ToBinary, @@ -176,7 +176,7 @@ sip_bye(Req, _Call) -> from_jid => FromJID, to_jid => ToJID }), maybe_route_to_all_sessions(FromJID, ToJID, Acc, IQEl), - + ok = mod_jingle_sip_session:remove_session(CallID), {reply, ok}. sip_cancel(_InviteReq, Req, _Call) -> @@ -194,7 +194,7 @@ sip_cancel(_InviteReq, Req, _Call) -> from_jid => FromJID, to_jid => ToJID }), maybe_route_to_all_sessions(FromJID, ToJID, Acc, IQEl), - + ok = mod_jingle_sip_session:remove_session(CallID), {reply, ok}. sip_dialog_update(start, Dialog, Call) -> @@ -203,7 +203,7 @@ sip_dialog_update(start, Dialog, Call) -> case Transaction#trans.class of uas -> {ok, CallID} = nksip_dialog:call_id(Dialog), - mod_jingle_sip_backend:set_incoming_handle(CallID, DialogHandle); + mod_jingle_sip_session:set_incoming_handle(CallID, DialogHandle); _ -> ok @@ -247,7 +247,7 @@ invite_resp_callback({resp, 200, SIPMsg, _Call}) -> element => IQEl, from_jid => FromJID, to_jid => ToJID }), - ok = mod_jingle_sip_backend:set_outgoing_accepted(CallID), + ok = mod_jingle_sip_session:set_outgoing_accepted(CallID), maybe_route_to_all_sessions(FromJID, ToJID, Acc, IQEl), ok; invite_resp_callback({resp, 487, _SIPMsg, _Call}) -> @@ -286,6 +286,7 @@ invite_resp_callback({resp, ErrorCode, SIPMsg, _Call}) from_jid => FromJID, to_jid => ToJID }), maybe_route_to_all_sessions(FromJID, ToJID, Acc, IQEl), + ok = mod_jingle_sip_session:remove_session(CallID), ok; invite_resp_callback(Data) -> ?LOG_ERROR(#{what => sip_unknown_response, sip_data => Data}). @@ -303,7 +304,7 @@ send_ringing_session_info(SIPMsg, ErrorCode) -> dialog_id => DialogId, server_id => SrvId, from_jid => FromBinary, to_binary => ToBinary}), - mod_jingle_sip_backend:set_outgoing_handle(CallID, DialogHandle, FromJID, ToJID), + mod_jingle_sip_session:set_outgoing_handle(CallID, DialogHandle, FromJID, ToJID), RingingEl = #xmlel{name = <<"ringing">>, attrs = [{<<"xmlns">>, <<"urn:xmpp:jingle:apps:rtp:info:1">>}]}, diff --git a/src/jingle_sip/mod_jingle_sip.erl b/src/jingle_sip/mod_jingle_sip.erl index 2fb46a48832..7751caef50d 100644 --- a/src/jingle_sip/mod_jingle_sip.erl +++ b/src/jingle_sip/mod_jingle_sip.erl @@ -96,7 +96,9 @@ config_spec() -> validate = ip_address}, <<"transport">> => #option{type = string, validate = {enum, ["udp", "tcp"]}}, - <<"username_to_phone">> => #list{items = username_to_phone_spec()} + <<"username_to_phone">> => #list{items = username_to_phone_spec()}, + <<"backend">> => #option{type = atom, + validate = {module, mod_jingle_sip}} }, defaults = #{<<"proxy_host">> => "localhost", <<"proxy_port">> => 5060, @@ -104,7 +106,8 @@ config_spec() -> <<"local_host">> => "localhost", <<"sdp_origin">> => "127.0.0.1", <<"transport">> => "udp", - <<"username_to_phone">> => []} + <<"username_to_phone">> => [], + <<"backend">> => mnesia} }. username_to_phone_spec() -> @@ -206,7 +209,7 @@ resend_session_initiate(#iq{sub_el = Jingle} = IQ, Acc) -> From = mongoose_acc:from_jid(Acc), To = mongoose_acc:to_jid(Acc), SID = exml_query:attr(Jingle, <<"sid">>), - case mod_jingle_sip_backend:get_session_info(SID, From) of + case mod_jingle_sip_session:get_session_info(SID, From) of {ok, Session} -> maybe_resend_session_initiate(From, To, IQ, Acc, Session); _ -> @@ -247,7 +250,7 @@ translate_to_sip(<<"session-initiate">>, Jingle, Acc) -> %% Internal options async, {callback, fun jingle_sip_callbacks:invite_resp_callback/1}]), - Result = mod_jingle_sip_backend:set_outgoing_request(SID, Handle, FromJID, ToJID), + Result = mod_jingle_sip_session:set_outgoing_request(SID, Handle, FromJID, ToJID), {_, SrvId, DialogId, _CallId} = nksip_sipmsg:parse_handle(Handle), ?LOG_INFO(#{what => sip_session_start, text => <<"Start SIP session with set_outgoing_request call">>, @@ -258,7 +261,7 @@ translate_to_sip(<<"session-initiate">>, Jingle, Acc) -> translate_to_sip(<<"session-accept">>, Jingle, Acc) -> LServer = mongoose_acc:lserver(Acc), SID = exml_query:attr(Jingle, <<"sid">>), - case mod_jingle_sip_backend:get_incoming_request(SID, mongoose_acc:get(c2s, origin_jid, Acc)) of + case mod_jingle_sip_session:get_incoming_request(SID, mongoose_acc:get(c2s, origin_jid, Acc)) of {ok, ReqID} -> try_to_accept_session(ReqID, Jingle, Acc, LServer, SID); _ -> @@ -273,7 +276,7 @@ translate_to_sip(<<"source-update">> = Name, Jingle, Acc) -> translate_to_sip(<<"transport-info">>, Jingle, Acc) -> SID = exml_query:attr(Jingle, <<"sid">>), SDP = make_sdp_for_ice_candidate(Jingle), - case mod_jingle_sip_backend:get_outgoing_handle(SID, mongoose_acc:get(c2s, origin_jid, Acc)) of + case mod_jingle_sip_session:get_outgoing_handle(SID, mongoose_acc:get(c2s, origin_jid, Acc)) of {ok, undefined} -> ?LOG_ERROR(#{what => sip_missing_dialog, sid => SID, acc => Acc}), {error, item_not_found}; @@ -290,7 +293,7 @@ translate_to_sip(<<"session-terminate">>, Jingle, Acc) -> From = mongoose_acc:get(c2s, origin_jid, Acc), FromLUS = jid:to_lus(From), ToLUS = jid:to_lus(ToJID), - case mod_jingle_sip_backend:get_session_info(SID, From) of + case mod_jingle_sip_session:get_session_info(SID, From) of {ok, Session} -> try_to_terminate_the_session(FromLUS, ToLUS, Session); _ -> @@ -300,7 +303,7 @@ translate_to_sip(<<"session-terminate">>, Jingle, Acc) -> translate_source_change_to_sip(ActionName, Jingle, Acc) -> SID = exml_query:attr(Jingle, <<"sid">>), SDP = get_spd(ActionName, Jingle, Acc), - case mod_jingle_sip_backend:get_outgoing_handle(SID, mongoose_acc:get(c2s, origin_jid, Acc)) of + case mod_jingle_sip_session:get_outgoing_handle(SID, mongoose_acc:get(c2s, origin_jid, Acc)) of {ok, undefined} -> ?LOG_ERROR(#{what => sip_missing_dialod, sid => SID, acc => Acc}), {error, item_not_found}; @@ -342,7 +345,7 @@ try_to_accept_session(ReqID, Jingle, Acc, Server, SID) -> LocalHost = gen_mod:get_module_opt(Server, ?MODULE, local_host), case nksip_request_reply({ok, [{body, SDP}, {local_host, LocalHost}]}, ReqID) of ok -> - ok = mod_jingle_sip_backend:set_incoming_accepted(SID), + ok = mod_jingle_sip_session:set_incoming_accepted(SID), terminate_session_on_other_devices(SID, Acc), ok; Other -> diff --git a/src/jingle_sip/mod_jingle_sip_backend.erl b/src/jingle_sip/mod_jingle_sip_backend.erl index 06b9914670f..c1d32cea498 100644 --- a/src/jingle_sip/mod_jingle_sip_backend.erl +++ b/src/jingle_sip/mod_jingle_sip_backend.erl @@ -1,238 +1,54 @@ %% @doc Backend module for mod_jingle_sip -%% @author Michal Piotrowski -%% -%%============================================================================== -%% Copyright 2018 Erlang Solutions Ltd. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%============================================================================== -module(mod_jingle_sip_backend). --include("mongoose.hrl"). - --type call_id() :: binary(). --type incoming_request() :: {node(), binary()}. --type outgoing_handle() :: binary(). - -export([init/2]). --export([set_incoming_request/5]). --export([set_incoming_handle/2]). --export([set_outgoing_request/4]). --export([set_outgoing_handle/4]). --export([set_outgoing_accepted/1]). --export([set_incoming_accepted/1]). --export([get_incoming_request/2]). --export([get_outgoing_handle/2]). --export([get_session_info/2]). +-export([read_session/1]). +-export([write_new_session/2]). +-export([update_session/2]). -export([remove_session/1]). --ignore_xref([remove_session/1]). - --record(jingle_sip_session, {sid, - dialog, - state, - direction, - request, - node, - owner, - from, - to, - now, - meta}). - -init(_Host, _Opts) -> - mnesia:create_table(jingle_sip_session, - [{ram_copies, [node()]}, - {attributes, record_info(fields, jingle_sip_session)}]). - --spec set_incoming_request(CallID :: call_id(), ReqID :: binary(), - From :: jid:jid(), To :: jid:jid(), exml:element()) -> - ok | {error, any()}. -set_incoming_request(CallID, ReqID, From, To, JingleEl) -> - TFun = pa:bind(fun set_incoming_request_tr/5, CallID, ReqID, From, To, JingleEl), - run_transaction(TFun). - -set_incoming_request_tr(CallID, ReqID, From, To, JingleEl) -> - Owner = jid:to_lus(To), - case mnesia:wread({jingle_sip_session, CallID}) of - [_] -> - {error, sid_already_exists}; - _ -> - Meta = #{init_stanza => JingleEl}, - Session = #jingle_sip_session{sid = CallID, - request = ReqID, - dialog = undefined, - state = undefined, - direction = in, - node = node(), - from = jid:to_lus(From), - to = Owner, - owner = Owner, - now = os:system_time(microsecond), - meta = Meta}, - mnesia:write(Session) - end. - --spec set_outgoing_request(CallID :: call_id(), ReqID :: binary(), - From :: jid:jid(), To :: jid:jid()) -> - ok | {error, any()}. -set_outgoing_request(CallID, ReqID, From, To) -> - TFun = pa:bind(fun set_outgoing_request_tr/4, CallID, ReqID, From, To), - run_transaction(TFun). - -set_outgoing_request_tr(CallID, ReqID, From, To) -> - Owner = jid:to_lus(From), - case mnesia:wread({jingle_sip_session, CallID}) of - [_] -> - {error, sid_already_exists}; - _ -> - Session = #jingle_sip_session{sid = CallID, - request = ReqID, - dialog = undefined, - state = undefined, - direction = out, - node = node(), - from = Owner, - to = jid:to_lus(To), - owner = Owner, - now = os:system_time(microsecond), - meta = #{}}, - mnesia:write(Session) - end. - -run_transaction(TFun) -> - case mnesia:transaction(TFun) of - {atomic, Result} -> - Result; - {aborted, Reason} -> - {error, Reason} - end. - +-include("mongoose.hrl"). -set_incoming_handle(CallID, DialogHandle) -> - TFun = pa:bind(fun set_incoming_handle_tr/2, CallID, DialogHandle), - run_transaction(TFun). +-type call_id() :: mod_jingle_sip_session:call_id(). +-type session() :: mod_jingle_sip_session:session(). +-type update_fun() :: mod_jingle_sip_session:update_fun(). -set_incoming_handle_tr(CallID, DialogHandle) -> - case mnesia:wread({jingle_sip_session, CallID}) of - [#jingle_sip_session{dialog = undefined, direction = in} = Session] -> - Session2 = Session#jingle_sip_session{dialog = DialogHandle, - node = node()}, - mnesia:write(Session2); - [_] -> - {error, incoming_handle_exists}; - _ -> - {error, not_found} - end. +-define(MAIN_MODULE, mod_jingle_sip). -set_outgoing_handle(CallID, DialogHandle, From, To) -> - TFun = pa:bind(fun set_outgoing_handle_tr/4, CallID, DialogHandle, From, To), - run_transaction(TFun). +-callback init(mongooseim:host_type(), gen_mod:module_opts()) -> ok. -set_outgoing_handle_tr(CallID, DialogHandle, _From, _To) -> - case mnesia:wread({jingle_sip_session, CallID}) of - [#jingle_sip_session{dialog = undefined, direction = out} = Session] -> - Session2 = Session#jingle_sip_session{dialog = DialogHandle, - node = node()}, - mnesia:write(Session2); - [_] -> - {error, outgoing_handle_exists}; - _ -> - Session = #jingle_sip_session{sid = CallID, - dialog = DialogHandle, - node = node(), - direction = out, - state = ringing}, - mnesia:write(Session) - end. +-callback read_session(call_id()) -> [session()]. -set_incoming_accepted(CallID) -> - TFun = pa:bind(fun set_incoming_accepted_tr/1, CallID), - run_transaction(TFun). +-callback write_new_session(call_id(), session()) -> + ok | {error, conflict}. -set_incoming_accepted_tr(CallID) -> - case mnesia:wread({jingle_sip_session, CallID}) of - [#jingle_sip_session{direction = in, meta = Meta} = Session] -> - MetaWithoutInitStanza = maps:without([init_stanza], Meta), - Session2 = Session#jingle_sip_session{state = accepted, - meta = MetaWithoutInitStanza}, - mnesia:write(Session2); - _ -> - {error, not_found} - end. +-callback update_session(call_id(), update_fun()) -> ok | {error, _}. -set_outgoing_accepted(CallID) -> - TFun = pa:bind(fun set_outgoing_accepted_tr/1, CallID), - run_transaction(TFun). +-callback remove_session(call_id()) -> ok. -set_outgoing_accepted_tr(CallID) -> - case mnesia:wread({jingle_sip_session, CallID}) of - [#jingle_sip_session{direction = out} = Session] -> - Session2 = Session#jingle_sip_session{state = accepted}, - mnesia:write(Session2); - _ -> - {error, not_found} - end. +-spec init(mongooseim:host_type(), gen_mod:module_opts()) -> ok. +init(Host, Opts) -> + Args = [Host, Opts], + mongoose_backend:init(global, ?MAIN_MODULE, [], Opts), + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). --spec get_incoming_request(call_id(), jid:jid()) -> {ok, undefined | incoming_request()} | - {error, not_found}. -get_incoming_request(CallID, User) -> - UserUS = jid:to_lus(User), - case mnesia:dirty_read(jingle_sip_session, CallID) of - [#jingle_sip_session{request = ReqID, node = Node, owner = UserUS}] -> - {ok, {Node, ReqID}}; - _ -> - {error, not_found} - end. +-spec read_session(call_id()) -> [session()]. +read_session(CallID) -> + Args = [CallID], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). --spec get_outgoing_handle(call_id(), jid:jid()) -> {ok, undefined | outgoing_handle()} | - {error, not_found}. -get_outgoing_handle(SID, User) -> - UserUS = jid:to_lus(User), - case mnesia:dirty_read(jingle_sip_session, SID) of - [#jingle_sip_session{dialog = Handle, owner = UserUS}] -> - {ok, Handle}; - _ -> - {error, not_found} - end. +-spec write_new_session(call_id(), session()) -> + ok | {error, conflict}. +write_new_session(CallID, Session) -> + Args = [CallID, Session], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). --spec get_session_info(binary(), jid:jid()) -> - {ok, map()} | {error, any()}. -get_session_info(SID, User) -> - UserUS = jid:to_lus(User), - case mnesia:dirty_read(jingle_sip_session, SID) of - [#jingle_sip_session{sid = SID, - dialog = Handle, - request = Request, - state = State, - direction = Dir, - node = ONode, - owner = UserUS, - to = To, - from = From, - meta = Meta}] -> - {ok, #{sid => SID, - dialog => Handle, - request => Request, - state => State, - direction => Dir, - node => ONode, - from => From, - to => To, - meta => Meta}}; - _ -> - {error, not_found} - end. +-spec update_session(call_id(), update_fun()) -> ok | {error, _}. +update_session(CallID, F) -> + Args = [CallID, F], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec remove_session(call_id()) -> ok. remove_session(CallID) -> - mnesia:dirty_delete(jingle_sip_session, CallID). + Args = [CallID], + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). diff --git a/src/jingle_sip/mod_jingle_sip_cets.erl b/src/jingle_sip/mod_jingle_sip_cets.erl new file mode 100644 index 00000000000..d9c255a291a --- /dev/null +++ b/src/jingle_sip/mod_jingle_sip_cets.erl @@ -0,0 +1,58 @@ +%% @doc Backend module for mod_jingle_sip for CETS backend +-module(mod_jingle_sip_cets). +-behaviour(mod_jingle_sip_backend). + +-include("mongoose.hrl"). + +-export([init/2]). +-export([read_session/1]). +-export([write_new_session/2]). +-export([update_session/2]). +-export([remove_session/1]). + +-type call_id() :: mod_jingle_sip_session:call_id(). +-type session() :: mod_jingle_sip_session:session(). +-type update_fun() :: mod_jingle_sip_session:update_fun(). + +-define(TABLE, cets_jingle_sip_session). + +-spec init(mongooseim:host_type(), gen_mod:module_opts()) -> ok. +init(_Host, _Opts) -> + %% We store Erlang records, so keypos is 2 + cets:start(?TABLE, #{keypos => 2}), + cets_discovery:add_table(mongoose_cets_discovery, ?TABLE). + +-spec read_session(call_id()) -> [session()]. +read_session(CallID) -> + ets:lookup(?TABLE, CallID). + +-spec write_new_session(call_id(), session()) -> + ok | {error, conflict}. +write_new_session(CallID, Session) -> + case read_session(CallID) of + [_] -> + {error, conflict}; + _ -> + case cets:insert_new(?TABLE, Session) of + true -> + ok; + false -> + {error, conflict} + end + end. + +-spec update_session(call_id(), update_fun()) -> ok | {error, _}. +update_session(CallID, F) -> + case read_session(CallID) of + [Session] -> + case F(Session) of + {error, _} = Err -> Err; + Session2 -> cets:insert(?TABLE, Session2) + end; + _ -> + {error, not_found} + end. + +-spec remove_session(call_id()) -> ok. +remove_session(CallID) -> + cets:delete(?TABLE, CallID). diff --git a/src/jingle_sip/mod_jingle_sip_mnesia.erl b/src/jingle_sip/mod_jingle_sip_mnesia.erl new file mode 100644 index 00000000000..bd3a96e9d14 --- /dev/null +++ b/src/jingle_sip/mod_jingle_sip_mnesia.erl @@ -0,0 +1,67 @@ +%% Mnesia backend module for mod_jingle_sip module +-module(mod_jingle_sip_mnesia). +-behaviour(mod_jingle_sip_backend). + +-include("mongoose.hrl"). +-include("mod_jingle_sip_session.hrl"). + +-export([init/2]). +-export([read_session/1]). +-export([write_new_session/2]). +-export([update_session/2]). +-export([remove_session/1]). + +-type call_id() :: mod_jingle_sip_session:call_id(). +-type session() :: mod_jingle_sip_session:session(). +-type update_fun() :: mod_jingle_sip_session:update_fun(). + +-spec init(mongooseim:host_type(), gen_mod:module_opts()) -> ok. +init(_Host, _Opts) -> + mnesia:create_table(jingle_sip_session, + [{ram_copies, [node()]}, + {attributes, record_info(fields, jingle_sip_session)}]), + ok. + +-spec read_session(call_id()) -> [session()]. +read_session(CallID) -> + mnesia:dirty_read(jingle_sip_session, CallID). + +-spec write_new_session(call_id(), session()) -> + ok | {error, conflict}. +write_new_session(CallID, Session) -> + run_transaction(fun() -> write_new_session_tr(CallID, Session) end). + +write_new_session_tr(CallID, Session) -> + case mnesia:wread({jingle_sip_session, CallID}) of + [_] -> + {error, conflict}; + _ -> + mnesia:write(Session) + end. + +-spec update_session(call_id(), update_fun()) -> ok | {error, _}. +update_session(CallID, F) -> + run_transaction(fun() -> update_session_tr(CallID, F) end). + +update_session_tr(CallID, F) -> + case mnesia:wread({jingle_sip_session, CallID}) of + [Session] -> + case F(Session) of + {error, _} = Err -> Err; + Session2 -> mnesia:write(Session2) + end; + _ -> + {error, not_found} + end. + +run_transaction(TFun) -> + case mnesia:transaction(TFun) of + {atomic, Result} -> + Result; + {aborted, Reason} -> + {error, Reason} + end. + +-spec remove_session(call_id()) -> ok. +remove_session(CallID) -> + mnesia:dirty_delete(jingle_sip_session, CallID). diff --git a/src/jingle_sip/mod_jingle_sip_session.erl b/src/jingle_sip/mod_jingle_sip_session.erl new file mode 100644 index 00000000000..2647baf3788 --- /dev/null +++ b/src/jingle_sip/mod_jingle_sip_session.erl @@ -0,0 +1,195 @@ +%% @doc Handles operations with #jingle_sip_session{} record +-module(mod_jingle_sip_session). + +-export([set_incoming_request/5]). +-export([set_incoming_handle/2]). +-export([set_outgoing_request/4]). +-export([set_outgoing_handle/4]). +-export([set_outgoing_accepted/1]). +-export([set_incoming_accepted/1]). +-export([get_incoming_request/2]). +-export([get_outgoing_handle/2]). +-export([get_session_info/2]). +-export([remove_session/1]). + +-include("mongoose.hrl"). +-include("mod_jingle_sip_session.hrl"). + +-type call_id() :: binary(). +-type incoming_request() :: {node(), binary()}. +-type outgoing_handle() :: binary(). +-type dialog_handle() :: nksip:handle(). +-type request_id() :: binary(). +-type session() :: #jingle_sip_session{ + sid :: call_id(), + dialog :: dialog_handle() | undefined, + state :: accepted | ringing | undefined, + direction :: in | out, + request :: request_id() | undefined, + node :: node(), + owner :: jid:simple_bare_jid(), + from :: jid:simple_bare_jid(), + to :: jid:simple_bare_jid(), + now :: integer(), + meta :: #{init_stanza => exml:element()} + }. +-type update_fun() :: fun((session()) -> session() | {error, term()}). +-export_type([call_id/0, session/0, update_fun/0]). + +-spec make_simple_bare_jid(jid:jid()) -> jid:simple_bare_jid(). +make_simple_bare_jid(Jid) -> + {_, _} = jid:to_lus(Jid). + +-spec set_incoming_request(CallID :: call_id(), ReqID :: request_id(), + From :: jid:jid(), To :: jid:jid(), exml:element()) -> + ok | {error, any()}. +set_incoming_request(CallID, ReqID, From, To, JingleEl) -> + Owner = make_simple_bare_jid(To), + Meta = #{init_stanza => JingleEl}, + Session = #jingle_sip_session{sid = CallID, + request = ReqID, + dialog = undefined, + state = undefined, + direction = in, + node = node(), + from = make_simple_bare_jid(From), + to = Owner, + owner = Owner, + now = os:system_time(microsecond), + meta = Meta}, + mod_jingle_sip_backend:write_new_session(CallID, Session). + +-spec set_outgoing_request(CallID :: call_id(), ReqID :: request_id(), + From :: jid:jid(), To :: jid:jid()) -> + ok | {error, any()}. +set_outgoing_request(CallID, ReqID, From, To) -> + Owner = make_simple_bare_jid(From), + Session = #jingle_sip_session{sid = CallID, + request = ReqID, + dialog = undefined, + state = undefined, + direction = out, + node = node(), + from = Owner, + to = make_simple_bare_jid(To), + owner = Owner, + now = os:system_time(microsecond), + meta = #{}}, + mod_jingle_sip_backend:write_new_session(CallID, Session). + +-spec set_incoming_handle(CallID :: call_id(), DialogHandle :: dialog_handle()) -> + ok | {error, any()}. +set_incoming_handle(CallID, DialogHandle) -> + F = fun(Session) -> do_set_incoming_handle(DialogHandle, Session) end, + mod_jingle_sip_backend:update_session(CallID, F). + +do_set_incoming_handle(DialogHandle, Session = #jingle_sip_session{dialog = undefined, direction = in}) -> + Session#jingle_sip_session{dialog = DialogHandle, + node = node()}; +do_set_incoming_handle(_, _) -> + {error, incoming_handle_exists}. + +-spec set_outgoing_handle(CallID :: call_id(), DialogHandle :: dialog_handle(), + From :: jid:jid(), To :: jid:jid()) -> + ok | {error, any()}. +set_outgoing_handle(CallID, DialogHandle, From, To) -> + F = fun(Session) -> do_set_outgoing_handle(DialogHandle, Session) end, + case mod_jingle_sip_backend:update_session(CallID, F) of + {error, not_found} -> + Owner = make_simple_bare_jid(From), + Session = #jingle_sip_session{sid = CallID, + dialog = DialogHandle, + node = node(), + direction = out, + state = ringing, + from = Owner, + to = make_simple_bare_jid(To), + owner = Owner, + now = os:system_time(microsecond), + meta = #{}}, + mod_jingle_sip_backend:write_new_session(CallID, Session); + Res -> + Res + end. + +do_set_outgoing_handle(DialogHandle, Session = #jingle_sip_session{dialog = undefined, direction = out}) -> + Session#jingle_sip_session{dialog = DialogHandle, + node = node()}; +do_set_outgoing_handle(_, _) -> + {error, outgoing_handle_exists}. + +-spec set_incoming_accepted(CallID :: call_id()) -> + ok | {error, any()}. +set_incoming_accepted(CallID) -> + mod_jingle_sip_backend:update_session(CallID, fun do_set_incoming_accepted/1). + +do_set_incoming_accepted(Session = #jingle_sip_session{direction = in, meta = Meta}) -> + MetaWithoutInitStanza = maps:without([init_stanza], Meta), + Session#jingle_sip_session{state = accepted, + meta = MetaWithoutInitStanza}; +do_set_incoming_accepted(_) -> + {error, not_found}. + +-spec set_outgoing_accepted(CallID :: call_id()) -> + ok | {error, any()}. +set_outgoing_accepted(CallID) -> + mod_jingle_sip_backend:update_session(CallID, fun do_set_outgoing_accepted/1). + +do_set_outgoing_accepted(Session = #jingle_sip_session{direction = out}) -> + Session#jingle_sip_session{state = accepted}; +do_set_outgoing_accepted(_) -> + {error, not_found}. + +-spec get_incoming_request(call_id(), jid:jid()) -> {ok, undefined | incoming_request()} | + {error, not_found}. +get_incoming_request(CallID, User) -> + UserUS = make_simple_bare_jid(User), + case mod_jingle_sip_backend:read_session(CallID) of + [#jingle_sip_session{request = ReqID, node = Node, owner = UserUS}] -> + {ok, {Node, ReqID}}; + _ -> + {error, not_found} + end. + +-spec get_outgoing_handle(call_id(), jid:jid()) -> {ok, undefined | outgoing_handle()} | + {error, not_found}. +get_outgoing_handle(SID, User) -> + UserUS = make_simple_bare_jid(User), + case mod_jingle_sip_backend:read_session(SID) of + [#jingle_sip_session{dialog = Handle, owner = UserUS}] -> + {ok, Handle}; + _ -> + {error, not_found} + end. + +-spec get_session_info(binary(), jid:jid()) -> + {ok, map()} | {error, any()}. +get_session_info(SID, User) -> + UserUS = make_simple_bare_jid(User), + case mod_jingle_sip_backend:read_session(SID) of + [#jingle_sip_session{sid = SID, + dialog = Handle, + request = Request, + state = State, + direction = Dir, + node = ONode, + owner = UserUS, + to = To, + from = From, + meta = Meta}] -> + {ok, #{sid => SID, + dialog => Handle, + request => Request, + state => State, + direction => Dir, + node => ONode, + from => From, + to => To, + meta => Meta}}; + _ -> + {error, not_found} + end. + +-spec remove_session(call_id()) -> ok. +remove_session(CallID) -> + mod_jingle_sip_backend:remove_session(CallID). diff --git a/src/mam/mod_mam_muc.erl b/src/mam/mod_mam_muc.erl index 7182af8a271..28ec3438d0a 100644 --- a/src/mam/mod_mam_muc.erl +++ b/src/mam/mod_mam_muc.erl @@ -23,7 +23,7 @@ %%% %%%
    %%%
  • date (using `timestamp()');
  • -%%%
  • node number (using {@link ejabberd_node_id}).
  • +%%%
  • node number (using {@link mongoose_node_num}).
  • %%%
%%% @end %%%------------------------------------------------------------------- diff --git a/src/mam/mod_mam_pm.erl b/src/mam/mod_mam_pm.erl index 2df3dd9745f..54b9189206d 100644 --- a/src/mam/mod_mam_pm.erl +++ b/src/mam/mod_mam_pm.erl @@ -23,7 +23,7 @@ %%% %%%
    %%%
  • date (using `timestamp()');
  • -%%%
  • node number (using {@link ejabberd_node_id}).
  • +%%%
  • node number (using {@link mongoose_node_num}).
  • %%%
%%% @end %%%------------------------------------------------------------------- diff --git a/src/mam/mod_mam_utils.erl b/src/mam/mod_mam_utils.erl index 1d7b127905c..d32f101d0f8 100644 --- a/src/mam/mod_mam_utils.erl +++ b/src/mam/mod_mam_utils.erl @@ -176,9 +176,9 @@ get_or_generate_mam_id(Acc) -> -spec generate_message_id(integer()) -> integer(). generate_message_id(CandidateStamp) -> - {ok, NodeId} = ejabberd_node_id:node_id(), + NodeNum = mongoose_node_num:node_num(), UniqueStamp = mongoose_mam_id:next_unique(CandidateStamp), - encode_compact_uuid(UniqueStamp, NodeId). + encode_compact_uuid(UniqueStamp, NodeNum). %% @doc Create a message ID (UID). %% @@ -186,17 +186,17 @@ generate_message_id(CandidateStamp) -> %% It puts node id as a last byte. %% The maximum date, that can be encoded is `{{4253, 5, 31}, {22, 20, 37}}'. -spec encode_compact_uuid(integer(), integer()) -> integer(). -encode_compact_uuid(Microseconds, NodeId) - when is_integer(Microseconds), is_integer(NodeId) -> - (Microseconds bsl 8) + NodeId. +encode_compact_uuid(Microseconds, NodeNum) + when is_integer(Microseconds), is_integer(NodeNum) -> + (Microseconds bsl 8) + NodeNum. %% @doc Extract date and node id from a message id. -spec decode_compact_uuid(integer()) -> {integer(), byte()}. decode_compact_uuid(Id) -> Microseconds = Id bsr 8, - NodeId = Id band 255, - {Microseconds, NodeId}. + NodeNum = Id band 255, + {Microseconds, NodeNum}. %% @doc Encode a message ID to pass it to the user. diff --git a/src/mod_bosh.erl b/src/mod_bosh.erl index 4df7da858d4..04f89cab39a 100644 --- a/src/mod_bosh.erl +++ b/src/mod_bosh.erl @@ -341,10 +341,10 @@ maybe_start_session_on_known_host(HostType, Req, Body, Opts) -> try maybe_start_session_on_known_host_unsafe(HostType, Req, Body, Opts) catch - error:Reason -> + error:Reason:Stacktrace -> %% It's here because something catch-y was here before ?LOG_ERROR(#{what => bosh_stop, issue => undefined_condition, - reason => Reason}), + reason => Reason, stacktrace => Stacktrace}), Req1 = terminal_condition(<<"undefined-condition">>, [], Req), {false, Req1} end. @@ -373,9 +373,13 @@ start_session(HostType, Peer, PeerCert, Body, Opts) -> store_session(Sid, Socket) -> mod_bosh_backend:create_session(#bosh_session{sid = Sid, socket = Socket}). +%% MUST be unique and unpredictable +%% https://xmpp.org/extensions/xep-0124.html#security-sidrid +%% Also, CETS requires to use node as a part of the key +%% (but if the key is always random CETS is happy with that too) -spec make_sid() -> binary(). make_sid() -> - mongoose_bin:encode_crypto(term_to_binary(make_ref())). + base16:encode(crypto:strong_rand_bytes(20)). %%-------------------------------------------------------------------- %% HTTP errors diff --git a/src/mod_bosh_cets.erl b/src/mod_bosh_cets.erl new file mode 100644 index 00000000000..18b338c656e --- /dev/null +++ b/src/mod_bosh_cets.erl @@ -0,0 +1,48 @@ +-module(mod_bosh_cets). + +-behaviour(mod_bosh_backend). + +%% mod_bosh_backend callbacks +-export([start/0, + create_session/1, + delete_session/1, + get_session/1, + get_sessions/0, + node_cleanup/1]). + +-include("mod_bosh.hrl"). + +-define(TABLE, cets_bosh). + +-spec start() -> any(). +start() -> + cets:start(?TABLE, #{keypos => 2}), + cets_discovery:add_table(mongoose_cets_discovery, ?TABLE). + +%% Session key (sid) is unique, so we don't expect conflicts +%% So, the confict resolution could be avoided +-spec create_session(mod_bosh:session()) -> any(). +create_session(Session) -> + cets:insert(?TABLE, Session). + +-spec delete_session(mod_bosh:sid()) -> any(). +delete_session(Sid) -> + cets:delete(?TABLE, Sid). + +-spec get_session(mod_bosh:sid()) -> [mod_bosh:session()]. +get_session(Sid) -> + ets:lookup(?TABLE, Sid). + +-spec get_sessions() -> [mod_bosh:session()]. +get_sessions() -> + ets:tab2list(?TABLE). + +-spec node_cleanup(atom()) -> any(). +node_cleanup(Node) -> + Guard = {'==', {node, '$1'}, Node}, + R = {'_', '_', '$1'}, + cets:sync(?TABLE), + %% We don't need to replicate deletes + %% We remove the local content here + ets:select_delete(?TABLE, [{R, [Guard], [true]}]), + ok. diff --git a/src/mod_disco.erl b/src/mod_disco.erl index 68a7a98a59a..9a05a199e92 100644 --- a/src/mod_disco.erl +++ b/src/mod_disco.erl @@ -58,7 +58,7 @@ -include("jlib.hrl"). -include("mongoose_config_spec.hrl"). --type return_hidden() :: ejabberd_router:return_hidden(). +-type return_hidden() :: mongoose_component:return_hidden(). -type server_info() :: #{name := binary(), urls := [binary()], modules => module()}. -spec start(mongooseim:host_type(), gen_mod:module_opts()) -> ok. @@ -291,7 +291,7 @@ get_external_components(Domain, ReturnHidden) -> lists:filter( fun(Component) -> check_if_host_is_the_shortest_suffix_for_route(Component, Domain, StaticDomains) - end, ejabberd_router:dirty_get_all_components(ReturnHidden)). + end, mongoose_component:dirty_get_all_components(ReturnHidden)). -spec check_if_host_is_the_shortest_suffix_for_route( Route :: jid:lserver(), Host :: jid:lserver(), VHosts :: [jid:lserver()]) -> boolean(). diff --git a/src/mod_muc.erl b/src/mod_muc.erl index 975ce2d2688..cd56976d1c8 100644 --- a/src/mod_muc.erl +++ b/src/mod_muc.erl @@ -66,7 +66,8 @@ remove_domain/3, acc_room_affiliations/3, can_access_identity/3, - disco_local_items/3]). + disco_local_items/3, + node_cleanup_for_host_type/3]). %% Stats -export([online_rooms_number/0]). @@ -118,11 +119,7 @@ host_type :: host_type(), pid :: pid() }. - --type muc_registered() :: #muc_registered{ - us_host :: jid:literal_jid(), - nick :: nick() - }. +-export_type([muc_online_room/0]). -type room_event_data() :: #{ from_nick := nick(), @@ -146,7 +143,7 @@ -type state() :: #muc_state{}. --export_type([muc_room/0, muc_registered/0]). +-export_type([muc_room/0]). -define(PROCNAME, ejabberd_mod_muc). @@ -166,6 +163,7 @@ start_link(HostType, Opts) -> -spec start(host_type(), _) -> ok. start(HostType, Opts) when is_map(Opts) -> + mongoose_muc_online_backend:start(HostType, Opts), ensure_metrics(HostType), start_supervisor(HostType), start_server(HostType, Opts), @@ -201,6 +199,8 @@ config_spec() -> #section{ items = #{<<"backend">> => #option{type = atom, validate = {module, mod_muc}}, + <<"online_backend">> => #option{type = atom, + validate = {module, mongoose_muc_online}}, <<"host">> => #option{type = string, validate = subdomain_template, process = fun mongoose_subdomain_utils:make_subdomain_pattern/1}, @@ -252,6 +252,7 @@ config_spec() -> defaults() -> #{<<"backend">> => mnesia, + <<"online_backend">> => mnesia, <<"host">> => default_host(), <<"access">> => all, <<"access_create">> => all, @@ -368,11 +369,7 @@ stop_gen_server(HostType) -> %% So the message sending must be catched -spec room_destroyed(host_type(), jid:server(), room(), pid()) -> 'ok'. room_destroyed(HostType, MucHost, Room, Pid) -> - Obj = #muc_online_room{name_host = {Room, MucHost}, - host_type = HostType, pid = Pid}, - F = fun() -> mnesia:delete_object(Obj) end, - {atomic, ok} = mnesia:transaction(F), - ok. + mongoose_muc_online_backend:room_destroyed(HostType, MucHost, Room, Pid). %% @doc Create a room. %% If Opts = default, the default room options are used. @@ -449,13 +446,7 @@ get_nick(HostType, MucHost, From) -> -spec init({host_type(), map()}) -> {ok, state()}. init({HostType, Opts}) -> mod_muc_backend:init(HostType, Opts), - mnesia:create_table(muc_online_room, - [{ram_copies, [node()]}, - {attributes, record_info(fields, muc_online_room)}]), - mnesia:add_table_copy(muc_online_room, node(), ram_copies), catch ets:new(muc_online_users, [bag, named_table, public, {keypos, 2}]), - clean_table_from_bad_node(node(), HostType), - mnesia:subscribe(system), #{access := Access, access_create := AccessCreate, access_admin := AccessAdmin, @@ -545,9 +536,6 @@ handle_call({create_instant, ServerHost, MucHost, Room, From, Nick, Opts}, handle_cast(_Msg, State) -> {noreply, State}. -handle_info({mnesia_system_event, {mnesia_down, Node}}, State) -> - clean_table_from_bad_node(Node), - {noreply, State}; handle_info(stop_hibernated_persistent_rooms, #muc_state{host_type = HostType, hibernated_room_timeout = Timeout} = State) @@ -655,16 +643,16 @@ route_to_room(_MucHost, <<>>, {_, To, _Acc, _} = Routed, State) -> {_, _, Nick} = jid:to_lower(To), route_by_nick(Nick, Routed, State); route_to_room(MucHost, Room, Routed, #muc_state{} = State) -> - case mnesia:dirty_read(muc_online_room, {Room, MucHost}) of - [] -> + HostType = State#muc_state.host_type, + case find_room_pid(HostType, MucHost, Room) of + {error, not_found} -> case get_registered_room_or_route_error(MucHost, Room, Routed, State) of {ok, Pid} -> route_to_online_room(Pid, Routed); {route_error, _ErrText} -> ok end; - [R] -> - Pid = R#muc_online_room.pid, + {ok, Pid} -> route_to_online_room(Pid, Routed) end. @@ -696,7 +684,7 @@ get_registered_room_or_route_error_from_presence(MucHost, Room, From, To, Acc, default_room_opts = DefRoomOpts} = State, {_, _, Nick} = jid:to_lower(To), ServerHost = make_server_host(To, State), - Result = start_new_room(HostType, ServerHost, MucHost, Access, Room, + Result = start_room(HostType, ServerHost, MucHost, Access, Room, HistorySize, RoomShaper, HttpAuthPool, From, Nick, DefRoomOpts, Acc), case Result of @@ -714,11 +702,7 @@ get_registered_room_or_route_error_from_presence(MucHost, Room, From, To, Acc, {Acc1, Err} = jlib:make_error_reply( Acc, Packet, mongoose_xmpp_errors:service_unavailable(Lang, ErrText)), ejabberd_router:route(To, From, Acc1, Err), - {route_error, ErrText}; - _ -> - %% Unknown error, most likely a room process failed to start. - %% Do not notify user (we can send "internal server error"). - erlang:error({start_new_room_failed, Room, Result}) + {route_error, ErrText} end; {error, Reason} -> Lang = exml_query:attr(Packet, <<"xml:lang">>, <<>>), @@ -885,15 +869,13 @@ check_user_can_create_room(HostType, ServerHost, AccessCreate, From, RoomID) -> {error, no_matching_acl_rule} end. --spec start_new_room(HostType :: host_type(), ServerHost :: jid:lserver(), +-spec start_room(HostType :: host_type(), ServerHost :: jid:lserver(), MucHost :: muc_host(), Access :: access(), room(), - HistorySize :: 'undefined' | integer(), RoomShaper :: shaper:shaper(), + HistorySize :: undefined | integer(), RoomShaper :: shaper:shaper(), HttpAuthPool :: none | mongoose_http_client:pool(), From :: jid:jid(), nick(), - DefRoomOpts :: 'undefined' | [any()], Acc :: mongoose_acc:t()) - -> {'error', _} - | {'ok', 'undefined' | pid()} - | {'ok', 'undefined' | pid(), _}. -start_new_room(HostType, ServerHost, MucHost, Access, Room, + DefRoomOpts :: undefined | [any()], Acc :: mongoose_acc:t()) + -> {error, {failed_to_restore, Reason :: term()}} | {ok, pid()}. +start_room(HostType, ServerHost, MucHost, Access, Room, HistorySize, RoomShaper, HttpAuthPool, From, Nick, DefRoomOpts, Acc) -> case mod_muc_backend:restore_room(HostType, MucHost, Room) of @@ -918,38 +900,32 @@ start_new_room(HostType, ServerHost, MucHost, Access, Room, register_room_or_stop_if_duplicate(HostType, MucHost, Room, Pid) -> case register_room(HostType, MucHost, Room, Pid) of - {_, ok} -> + ok -> {ok, Pid}; - {_, {exists, OldPid}} -> + {exists, OldPid} -> mod_muc_room:stop(Pid), - {ok, OldPid} + {ok, OldPid}; + {error, Reason} -> + error({failed_to_register, MucHost, Room, Pid, Reason}) end. -spec register_room(HostType :: host_type(), jid:server(), room(), - pid()) -> {'aborted', _} | {'atomic', _}. + pid()) -> ok | {exists, pid()} | {error, term()}. register_room(HostType, MucHost, Room, Pid) -> - F = fun() -> - case mnesia:read(muc_online_room, {Room, MucHost}, write) of - [] -> - mnesia:write(#muc_online_room{name_host = {Room, MucHost}, - host_type = HostType, - pid = Pid}); - [R] -> - {exists, R#muc_online_room.pid} - end - end, - mnesia:transaction(F). - + mongoose_muc_online_backend:register_room(HostType, MucHost, Room, Pid). -spec room_jid_to_pid(RoomJID :: jid:jid()) -> {ok, pid()} | {error, not_found}. -room_jid_to_pid(#jid{luser=RoomName, lserver=MucService}) -> - case mnesia:dirty_read(muc_online_room, {RoomName, MucService}) of - [R] -> - {ok, R#muc_online_room.pid}; - [] -> +room_jid_to_pid(#jid{luser = Room, lserver = MucHost}) -> + case mongoose_domain_api:get_subdomain_host_type(MucHost) of + {ok, HostType} -> + find_room_pid(HostType, MucHost, Room); + _ -> {error, not_found} end. +find_room_pid(HostType, MucHost, Room) -> + mongoose_muc_online_backend:find_room_pid(HostType, MucHost, Room). + -spec default_host() -> mongoose_subdomain_utils:subdomain_pattern(). default_host() -> mongoose_subdomain_utils:make_subdomain_pattern(<<"conference.@HOST@">>). @@ -1183,10 +1159,8 @@ broadcast_service_message(MucHost, Msg) -> -spec get_vh_rooms(muc_host()) -> [muc_online_room()]. get_vh_rooms(MucHost) -> - mnesia:dirty_select(muc_online_room, - [{#muc_online_room{name_host = '$1', _ = '_'}, - [{'==', {element, 2, '$1'}, MucHost}], - ['$_']}]). + {ok, HostType} = mongoose_domain_api:get_subdomain_host_type(MucHost), + mongoose_muc_online_backend:get_online_rooms(HostType, MucHost). -spec get_persistent_vh_rooms(muc_host()) -> [muc_room()]. get_persistent_vh_rooms(MucHost) -> @@ -1198,36 +1172,9 @@ get_persistent_vh_rooms(MucHost) -> [] end. --spec clean_table_from_bad_node(node()) -> any(). -clean_table_from_bad_node(Node) -> - F = fun() -> - Es = mnesia:select( - muc_online_room, - [{#muc_online_room{pid = '$1', _ = '_'}, - [{'==', {node, '$1'}, Node}], - ['$_']}]), - lists:foreach(fun(E) -> - mnesia:delete_object(E) - end, Es) - end, - mnesia:async_dirty(F). - - --spec clean_table_from_bad_node(node(), host_type()) -> any(). -clean_table_from_bad_node(Node, HostType) -> - F = fun() -> - Es = mnesia:select( - muc_online_room, - [{#muc_online_room{pid = '$1', - host_type = HostType, - _ = '_'}, - [{'==', {node, '$1'}, Node}], - ['$_']}]), - lists:foreach(fun(E) -> - mnesia:delete_object(E) - end, Es) - end, - mnesia:async_dirty(F). +-spec node_cleanup(host_type(), node()) -> ok. +node_cleanup(HostType, Node) -> + mongoose_muc_online_backend:node_cleanup(HostType, Node). %%==================================================================== %% Hooks handlers @@ -1309,6 +1256,14 @@ disco_local_items(Acc = #{host_type := HostType, disco_local_items(Acc, _, _) -> {ok, Acc}. +-spec node_cleanup_for_host_type(Acc, Params, Extra) -> {ok, Acc} when + Acc :: mongoose_disco:item_acc(), + Params :: map(), + Extra :: gen_hook:extra(). +node_cleanup_for_host_type(Acc, #{node := Node}, #{host_type := HostType}) -> + node_cleanup(HostType, Node), + Acc. + online_rooms_number() -> lists:sum([online_rooms_number(HostType) || HostType <- gen_mod:hosts_with_module(?MODULE)]). @@ -1372,7 +1327,8 @@ hooks(HostType) -> {remove_domain, HostType, fun ?MODULE:remove_domain/3, #{}, 50}, {acc_room_affiliations, HostType, fun ?MODULE:acc_room_affiliations/3, #{}, 50}, {can_access_identity, HostType, fun ?MODULE:can_access_identity/3, #{}, 50}, - {disco_local_items, HostType, fun ?MODULE:disco_local_items/3, #{}, 250}]. + {disco_local_items, HostType, fun ?MODULE:disco_local_items/3, #{}, 250}, + {node_cleanup_for_host_type, HostType, fun ?MODULE:node_cleanup_for_host_type/3, #{}, 50}]. subdomain_pattern(HostType) -> gen_mod:get_module_opt(HostType, ?MODULE, host). diff --git a/src/mod_muc_mnesia.erl b/src/mod_muc_mnesia.erl index 4e61bafe168..5fb4c134389 100644 --- a/src/mod_muc_mnesia.erl +++ b/src/mod_muc_mnesia.erl @@ -41,6 +41,11 @@ -include("jlib.hrl"). -include("mod_muc.hrl"). +-record(muc_registered, { + us_host :: {US :: jid:simple_bare_jid(), MucHost :: jid:lserver()} | '$1', + nick :: mod_muc:nick() + }). + init(_HostType, _Opts) -> mnesia:create_table(muc_room, [{disc_copies, [node()]}, diff --git a/src/mod_muc_room.erl b/src/mod_muc_room.erl index ff74e21ae36..8d896f71424 100644 --- a/src/mod_muc_room.erl +++ b/src/mod_muc_room.erl @@ -154,13 +154,11 @@ %%% API %%%---------------------------------------------------------------------- --spec start_new(HostType :: mongooseim:host_type(), Host :: jid:server(), ServerHost :: jid:server(), +-spec start_new(HostType :: mongooseim:host_type(), Host :: jid:lserver(), ServerHost :: jid:lserver(), Access :: _, Room :: mod_muc:room(), HistorySize :: integer(), RoomShaper :: shaper:shaper(), HttpAuthPool :: none | mongoose_http_client:pool(), Creator :: jid:jid(), Nick :: mod_muc:nick(), - DefRoomOpts :: list()) -> {'error', _} - | {'ok', 'undefined' | pid()} - | {'ok', 'undefined' | pid(), _}. + DefRoomOpts :: list()) -> {ok, pid()}. start_new(HostType, Host, ServerHost, Access, Room, HistorySize, RoomShaper, HttpAuthPool, Creator, Nick, DefRoomOpts) -> Supervisor = gen_mod:get_module_proc(HostType, ejabberd_mod_muc_sup), @@ -171,12 +169,10 @@ start_new(HostType, Host, ServerHost, Access, Room, creator => Creator, nick => Nick, def_opts => DefRoomOpts}, supervisor:start_child(Supervisor, [Args]). --spec start_restored(HostType :: mongooseim:host_type(), Host :: jid:server(), ServerHost :: jid:server(), +-spec start_restored(HostType :: mongooseim:host_type(), Host :: jid:lserver(), ServerHost :: jid:lserver(), Access :: _, Room :: mod_muc:room(), HistorySize :: integer(), RoomShaper :: shaper:shaper(), HttpAuthPool :: none | mongoose_http_client:pool(), - Opts :: list()) -> {'error', _} - | {'ok', 'undefined' | pid()} - | {'ok', 'undefined' | pid(), _}. + Opts :: list()) -> {ok, pid()}. start_restored(HostType, Host, ServerHost, Access, Room, HistorySize, RoomShaper, HttpAuthPool, Opts) when is_list(Opts) -> diff --git a/src/mongoose_cets_discovery.erl b/src/mongoose_cets_discovery.erl new file mode 100644 index 00000000000..e843846275c --- /dev/null +++ b/src/mongoose_cets_discovery.erl @@ -0,0 +1,35 @@ +-module(mongoose_cets_discovery). +-export([supervisor_specs/0]). + +-include("mongoose_logger.hrl"). + +supervisor_specs() -> + supervisor_specs(mongoose_config:get_opt([internal_databases, cets], disabled)). + +supervisor_specs(disabled) -> + []; +supervisor_specs(#{backend := DiscoBackend, cluster_name := ClusterName} = Opts) -> + DiscoFile = + case {DiscoBackend, Opts} of + {file, #{node_list_file := NodeFile}} -> + NodeFile; + {file, _} -> + ?LOG_CRITICAL(#{what => node_list_file_option_is_required, + text => <<"Specify internal_databases.cets.node_list_file option">>}), + error(node_list_file_option_is_required); + _ -> + undefined + end, + DiscoOpts = #{ + backend_module => disco_backend_to_module(DiscoBackend), + cluster_name => atom_to_binary(ClusterName), + node_name_to_insert => atom_to_binary(node(), latin1), + name => mongoose_cets_discovery, disco_file => DiscoFile}, + CetsDisco = + {cets_discovery, + {cets_discovery, start_link, [DiscoOpts]}, + permanent, infinity, supervisor, [cets_discovery]}, + [CetsDisco]. + +disco_backend_to_module(rdbms) -> mongoose_cets_discovery_rdbms; +disco_backend_to_module(file) -> cets_discovery_file. diff --git a/src/mongoose_cets_discovery_rdbms.erl b/src/mongoose_cets_discovery_rdbms.erl new file mode 100644 index 00000000000..87b81371e77 --- /dev/null +++ b/src/mongoose_cets_discovery_rdbms.erl @@ -0,0 +1,105 @@ +%% @doc MongooseIM RDBMS backend for cets_discovery. +-module(mongoose_cets_discovery_rdbms). +-behaviour(cets_discovery). +-export([init/1, get_nodes/1]). + +-include_lib("kernel/include/logger.hrl"). + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). +-endif. + +-type opts() :: #{cluster_name => binary(), node_name_to_insert => binary(), last_query_info => map()}. +-type state() :: opts(). + +-spec init(opts()) -> state(). +init(Opts = #{cluster_name := _, node_name_to_insert := _}) -> + Opts#{last_query_info => #{}}. + +-spec get_nodes(state()) -> {cets_discovery:get_nodes_result(), state()}. +get_nodes(State = #{cluster_name := ClusterName, node_name_to_insert := Node}) -> + try + try_register(ClusterName, Node) + of + {Num, Nodes, Info} -> + mongoose_node_num:set_node_num(Num), + {{ok, Nodes}, State#{last_query_info => Info}} + catch Class:Reason:Stacktrace -> + ?LOG_ERROR(#{what => discovery_failed_select, class => Class, + reason => Reason, stacktrace => Stacktrace}), + {{error, Reason}, State} + end. + +try_register(ClusterName, NodeBin) when is_binary(NodeBin), is_binary(ClusterName) -> + Node = binary_to_atom(NodeBin), + prepare(), + {selected, Rows} = select(ClusterName), + Pairs = [{binary_to_atom(DbNodeBin), Num} || {DbNodeBin, Num} <- Rows], + {Nodes, Nums} = lists:unzip(Pairs), + AlreadyRegistered = lists:member(Node, Nodes), + Timestamp = timestamp(), + NodeNum = + case AlreadyRegistered of + true -> + update_existing(ClusterName, NodeBin, Timestamp), + {value, {_, Num}} = lists:keysearch(Node, 1, Pairs), + Num; + false -> + Num = next_free_num(lists:usort(Nums)), + %% Could fail with duplicate node_num reason. + %% In this case just wait for the next get_nodes call. + insert_new(ClusterName, NodeBin, Timestamp, Num), + Num + end, + %% This could be used for debugging + Info = #{already_registered => AlreadyRegistered, timestamp => Timestamp, + node_num => Num, last_rows => Rows}, + {NodeNum, Nodes, Info}. + +prepare() -> + T = discovery_nodes, + mongoose_rdbms:prepare(cets_disco_select, T, [cluster_name], select()), + mongoose_rdbms:prepare(cets_disco_insert_new, T, + [cluster_name, node_name, node_num, updated_timestamp], insert_new()), + mongoose_rdbms:prepare(cets_disco_update_existing, T, + [updated_timestamp, cluster_name, node_name], update_existing()). + +select() -> + <<"SELECT node_name, node_num FROM discovery_nodes WHERE cluster_name = ?">>. + +select(ClusterName) -> + mongoose_rdbms:execute_successfully(global, cets_disco_select, [ClusterName]). + +insert_new() -> + <<"INSERT INTO discovery_nodes (cluster_name, node_name, node_num, updated_timestamp)" + " VALUES (?, ?, ?, ?)">>. + +insert_new(ClusterName, Node, Timestamp, Num) -> + mongoose_rdbms:execute(global, cets_disco_insert_new, [ClusterName, Node, Num, Timestamp]). + +update_existing() -> + <<"UPDATE discovery_nodes SET updated_timestamp = ? WHERE cluster_name = ? AND node_name = ?">>. + +update_existing(ClusterName, Node, Timestamp) -> + mongoose_rdbms:execute(global, cets_disco_update_existing, [Timestamp, ClusterName, Node]). + +timestamp() -> + os:system_time(microsecond). + +%% Returns a next free node id based on the currently registered ids +next_free_num([]) -> + 0; +next_free_num([H | T = [E | _]]) when ((H + 1) =:= E) -> + %% Sequential, ignore H + next_free_num(T); +next_free_num([H | _]) -> + H + 1. + +-ifdef(TEST). + +jid_to_opt_binary_test_() -> + [?_assertEqual(0, next_free_num([])), + ?_assertEqual(3, next_free_num([1, 2, 5])), + ?_assertEqual(3, next_free_num([1, 2]))]. + +-endif. diff --git a/src/mongoose_cleaner.erl b/src/mongoose_cleaner.erl index ec8f5b1823c..388937beaaa 100644 --- a/src/mongoose_cleaner.erl +++ b/src/mongoose_cleaner.erl @@ -87,7 +87,11 @@ cleanup_modules(Node) -> end. run_node_cleanup(Node) -> - {Elapsed, RetVal} = timer:tc(mongoose_hooks, node_cleanup, [Node]), + {Elapsed, RetVal} = timer:tc(fun() -> + mongoose_hooks:node_cleanup(Node), + [mongoose_hooks:node_cleanup_for_host_type(HostType, Node) || HostType <- ?ALL_HOST_TYPES], + ok + end), ?LOG_NOTICE(#{what => cleaner_done, text => <<"Finished cleaning after dead node">>, duration => erlang:round(Elapsed / 1000), diff --git a/src/mongoose_collector.erl b/src/mongoose_collector.erl index 81869273a2e..ebc356153ff 100644 --- a/src/mongoose_collector.erl +++ b/src/mongoose_collector.erl @@ -4,6 +4,7 @@ %% gen_server callbacks -behaviour(gen_server). +-export([start_common/3, stop_common/2]). -export([start_link/2, init/1, handle_call/3, handle_cast/2, handle_info/2]). -ignore_xref([start_link/2]). @@ -16,6 +17,17 @@ timer_ref :: undefined | reference() }). +-spec start_common(atom(), mongooseim:host_type(), map()) -> term(). +start_common(Module, HostType, WOpts) -> + Name = gen_mod:get_module_proc(HostType, Module), + MFA = {mongoose_collector, start_link, [Name, WOpts]}, + ChildSpec = {Name, MFA, permanent, 5000, worker, [Module, mongoose_collector]}, + ejabberd_sup:start_child(ChildSpec). + +stop_common(Module, HostType) -> + Name = gen_mod:get_module_proc(HostType, Module), + ejabberd_sup:stop_child(Name). + start_link(Name, Opts) -> gen_server:start_link({local, Name}, ?MODULE, Opts, []). diff --git a/src/mongoose_listener_sup.erl b/src/mongoose_listener_sup.erl index 9191cdea91b..b0b6a9f89a6 100644 --- a/src/mongoose_listener_sup.erl +++ b/src/mongoose_listener_sup.erl @@ -8,6 +8,8 @@ -ignore_xref([start_link/0, init/1]). +-include("mongoose_logger.hrl"). + %% API -spec start_link() -> {ok, pid()}. @@ -16,7 +18,8 @@ start_link() -> -spec start_child(supervisor:child_spec()) -> ok. start_child(ChildSpec) -> - {ok, _Pid} = supervisor:start_child(?MODULE, ChildSpec), + %% Use ejabberd_sup function for extra logging on errors + ejabberd_sup:start_child(?MODULE, ChildSpec), ok. %% Supervisor callbacks diff --git a/src/mongoose_node_num.erl b/src/mongoose_node_num.erl new file mode 100644 index 00000000000..ab2f67ccdbd --- /dev/null +++ b/src/mongoose_node_num.erl @@ -0,0 +1,23 @@ +%% Returns a numeric id from 0 to 255 for the current node. +%% Used to generate MAM IDs. +-module(mongoose_node_num). +-export([node_num/0, set_node_num/1]). + +-type node_num() :: 0..255. +-export_type([node_num/0]). + +%% @doc Return an integer node ID. +-spec node_num() -> node_num(). +node_num() -> + %% We just return 0 if set_node_num/1 is not called. + persistent_term:get(?MODULE, 0). + +-spec set_node_num(node_num()) -> ignore | updated | same. +set_node_num(Num) -> + case node_num() =:= Num of + true -> + same; + false -> + persistent_term:put(?MODULE, Num), + updated + end. diff --git a/src/mongoose_node_num_mnesia.erl b/src/mongoose_node_num_mnesia.erl new file mode 100644 index 00000000000..4a3f209f53a --- /dev/null +++ b/src/mongoose_node_num_mnesia.erl @@ -0,0 +1,36 @@ +-module(mongoose_node_num_mnesia). + +-export([init/0]). + +-record(node_num, {name :: atom(), + num :: mongoose_node_num:node_num() }). + +init() -> + mnesia:create_table(node_num, + [{ram_copies, [node()]}, {type, set}, + {attributes, record_info(fields, node_num)}]), + mnesia:add_table_index(node_num, num), + mnesia:add_table_copy(node_num, node(), ram_copies), + register_node(node()), + [#node_num{num = Num}] = mnesia:dirty_read(node_num, node()), + mongoose_node_num:set_node_num(Num), + ok. + +-spec register_node(atom()) -> ok. +register_node(NodeName) -> + {atomic, _} = mnesia:transaction(fun() -> + case mnesia:read(node_num, NodeName) of + [] -> + mnesia:write(#node_num{name = NodeName, num = next_node_num()}); + [_] -> ok + end + end), + ok. + +-spec next_node_num() -> mongoose_node_num:node_num(). +next_node_num() -> + max_node_num() + 1. + +-spec max_node_num() -> mongoose_node_num:node_num(). +max_node_num() -> + mnesia:foldl(fun(#node_num{num = Num}, Max) -> max(Num, Max) end, 0, node_num). diff --git a/src/mongoose_router_external.erl b/src/mongoose_router_external.erl index edefe05a0ed..a55a083e705 100644 --- a/src/mongoose_router_external.erl +++ b/src/mongoose_router_external.erl @@ -20,7 +20,7 @@ filter(OrigFrom, OrigTo, OrigAcc, OrigPacket) -> route(From, To, Acc0, Packet) -> LDstDomain = To#jid.lserver, - case ejabberd_router:lookup_component(LDstDomain) of + case mongoose_component:lookup_component(LDstDomain) of [] -> {From, To, Acc0, Packet}; [#external_component{handler = Handler}|_] -> %% may be multiple on various nodes diff --git a/src/mongoose_router_external_localnode.erl b/src/mongoose_router_external_localnode.erl index 96a64dc0dc8..e09441b39cd 100644 --- a/src/mongoose_router_external_localnode.erl +++ b/src/mongoose_router_external_localnode.erl @@ -21,7 +21,7 @@ filter(OrigFrom, OrigTo, OrigAcc, OrigPacket) -> route(From, To, Acc0, Packet) -> LDstDomain = To#jid.lserver, - case ejabberd_router:lookup_component(LDstDomain, node()) of + case mongoose_component:lookup_component(LDstDomain, node()) of [] -> {From, To, Acc0, Packet}; [#external_component{handler = Handler}] -> diff --git a/src/mongoose_start_node_id.erl b/src/mongoose_start_node_id.erl new file mode 100644 index 00000000000..85be91312a3 --- /dev/null +++ b/src/mongoose_start_node_id.erl @@ -0,0 +1,109 @@ +%% Generates a unique ID on the node start. +%% Registers the ID on all other nodes. +%% Used in ejabberd_local to find to which node to route IQ responses. +-module(mongoose_start_node_id). +-behaviour(gen_server). + +%% API +-export([start_link/0]). +-export([node_id/0, node_id_to_name/1]). +-export([register_on_remote_node_rpc/3]). + +%% gen_server callbacks +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3]). + +-ignore_xref([start_link/0, register_on_remote_node_rpc/3]). + +-include("mongoose.hrl"). +-include("mongoose_logger.hrl"). + +-type id() :: binary(). + +-record(state, {start_id :: id(), mon_ref_to_start_id :: map()}). +-define(KEY, ?MODULE). + +-spec node_id() -> id(). +node_id() -> + persistent_term:get(?KEY). + +-spec node_id_to_name(id()) -> {ok, node()} | {error, unknown_id}. +node_id_to_name(ID) -> + case persistent_term:get({?KEY, ID}, undefined) of + undefined -> + {error, unknown_id}; + Name -> + {ok, Name} + end. + +-spec start_link() -> {ok, pid()}. +start_link() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +init(_) -> + net_kernel:monitor_nodes(true), + StartId = mongoose_bin:gen_from_crypto(), + persistent_term:put(mongoose_start_node_id, StartId), + [register_on_remote_node(RemoteNode, StartId) + || RemoteNode <- [node()|nodes()]], + {ok, #state{start_id = StartId, mon_ref_to_start_id = #{}}}. + +handle_call(_Request, _From, State) -> + {reply, ok, State}. + +handle_cast({register_cleaning_task, StartId, RemotePid}, + State = #state{mon_ref_to_start_id = Mon2StartId}) -> + MonRef = erlang:monitor(process, RemotePid), + Mon2StartId2 = maps:put(MonRef, StartId, Mon2StartId), + {noreply, State#state{mon_ref_to_start_id = Mon2StartId2}}; +handle_cast(_Msg, State) -> + {noreply, State}. + +handle_info({nodeup, RemoteNode}, State = #state{start_id = StartId}) -> + register_on_remote_node(RemoteNode, StartId), + {noreply, State}; +handle_info({'DOWN', MonRef, process, RemotePid, Reason}, + State = #state{mon_ref_to_start_id = Mon2StartId}) -> + case maps:get(MonRef, Mon2StartId, undefined) of + undefined -> + ?LOG_ERROR(#{what => node_id_unexpected_monitor, + reason => Reason, + monitor_ref => MonRef, + remote_pid => RemotePid, + remote_node => node(RemotePid)}); + StartId -> + persistent_term:erase({?KEY, StartId}), + ?LOG_WARNING(#{what => node_id_node_down, + reason => Reason, + monitor_ref => MonRef, + remote_pid => RemotePid, + remote_node => node(RemotePid)}) + end, + %% We use pid monitors instead of node monitors to avoid cleaning + %% start id when a node is restarting and reappearing very quicky. + %% I.e. node name could be reused by a newly started node, while Refs - not. + %% Pids could be also reused, but collisions are rare. + {noreply, State#state{mon_ref_to_start_id = maps:remove(MonRef, Mon2StartId)}}; +handle_info(_Info, State) -> + {noreply, State}. + +terminate(_Reason, _State) -> + ok. +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +register_on_remote_node(RemoteNode, StartId) -> + Res = rpc:call(RemoteNode, ?MODULE, register_on_remote_node_rpc, + [node(), StartId, self()]), + case Res of + ok -> + ok; + _ -> + ?LOG_ERROR(#{what => node_id_register_on_remote_node_failed, + remote_node => RemoteNode, reason => Res}) + end. + +register_on_remote_node_rpc(RemoteNode, StartId, RemotePid) -> + persistent_term:put({?KEY, StartId}, RemoteNode), + gen_server:cast(?MODULE, {register_cleaning_task, StartId, RemotePid}), + ok. diff --git a/src/mongoose_transport.erl b/src/mongoose_transport.erl index 87f358bf068..b1dc8d04b5d 100644 --- a/src/mongoose_transport.erl +++ b/src/mongoose_transport.erl @@ -18,7 +18,6 @@ %% Types %%---------------------------------------------------------------------- --type t() :: any(). -type send_xml_input() :: {xmlstreamelement, exml:element()} | jlib:xmlstreamstart() | jlib:xmlstreamend(). @@ -34,7 +33,7 @@ channel => connection_type(), atom() => any()}. --export_type([t/0, send_xml_input/0, peer/0, peername_return/0, peercert_return/0]). +-export_type([socket_data/0, send_xml_input/0, peer/0, peername_return/0, peercert_return/0]). -type socket_module() :: gen_tcp | mongoose_tls. -type socket() :: gen_tcp:socket() | mongoose_tls:socket(). @@ -286,7 +285,8 @@ handle_info({Tag, _TCPSocket, Data}, {ok, TLSData} -> NewState = process_data(TLSData, State), {noreply, NewState, hibernate_or_timeout(NewState)}; - {error, _Reason} -> + {error, Reason} -> + ?LOG_WARNING(#{what => transport_tls_recv_error, socket => Socket, reason => Reason}), {stop, normal, State} end; handle_info({Tag, _Socket}, State) when Tag == tcp_closed; Tag == ssl_closed -> diff --git a/src/mongooseim.app.src b/src/mongooseim.app.src index 03b6ffaa6de..630771af6bc 100644 --- a/src/mongooseim.app.src +++ b/src/mongooseim.app.src @@ -4,8 +4,7 @@ [{description, "MongooseIM"}, {vsn, {cmd, "tools/generate_vsn.sh"}}, {modules, []}, - {registered, [ - ]}, + {registered, []}, {applications, [ asn1, backoff, @@ -28,7 +27,6 @@ idna, kernel, lasse, - mnesia, observer_cli, pa, public_key, @@ -53,6 +51,7 @@ flatlog, segmented_cache ]}, + {included_applications, [mnesia, cets]}, {env, []}, {mod, {ejabberd_app, []}}]}. diff --git a/src/muc/mongoose_muc_online_backend.erl b/src/muc/mongoose_muc_online_backend.erl new file mode 100644 index 00000000000..ebefdbec989 --- /dev/null +++ b/src/muc/mongoose_muc_online_backend.erl @@ -0,0 +1,83 @@ +-module(mongoose_muc_online_backend). + +-export([start/2, + register_room/4, + room_destroyed/4, + find_room_pid/3, + get_online_rooms/2, + node_cleanup/2, + clear_table/1]). + +%% Used in tests +-ignore_xref([clear_table/1]). + +-define(MAIN_MODULE, mongoose_muc_online). + +%% Callbacks + +-callback start(mongooseim:host_type(), gen_mod:module_opts()) -> ok. + +-callback register_room( + HostType :: mongooseim:host_type(), + MucHost :: jid:lserver(), + Room :: mod_muc:room(), + Pid :: pid()) -> ok | {exists, pid()} | {error, term()}. + +-callback room_destroyed(mongooseim:host_type(), jid:lserver(), mod_muc:room(), pid()) -> ok. + +-callback find_room_pid(mongooseim:host_type(), jid:lserver(), mod_muc:room()) -> + {ok, pid()} | {error, not_found}. + +-callback get_online_rooms(mongooseim:host_type(), jid:lserver()) -> + [mod_muc:muc_online_room()]. + +-callback node_cleanup(mongooseim:host_type(), node()) -> ok. + +-callback clear_table(mongooseim:host_type()) -> ok. + +%% API Functions + +-spec start(mongooseim:host_type(), gen_mod:module_opts()) -> any(). +start(HostType, Opts = #{online_backend := Backend}) -> + mongoose_backend:init(HostType, ?MAIN_MODULE, tracked_funs(), #{backend => Backend}), + mongoose_backend:call(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, [HostType, Opts]). + +-spec tracked_funs() -> [atom()]. +tracked_funs() -> + [register_room, + room_destroyed, + get_online_rooms]. + +-spec register_room( + HostType :: mongooseim:host_type(), + MucHost :: jid:lserver(), + Room :: mod_muc:room(), + Pid :: pid()) -> ok | {exists, pid()} | {error, term()}. +register_room(HostType, MucHost, Room, Pid) -> + Args = [HostType, MucHost, Room, Pid], + mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +-spec room_destroyed(mongooseim:host_type(), jid:lserver(), mod_muc:room(), pid()) -> ok. +room_destroyed(HostType, MucHost, Room, Pid) -> + Args = [HostType, MucHost, Room, Pid], + mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +-spec find_room_pid(mongooseim:host_type(), jid:lserver(), mod_muc:room()) -> + {ok, pid()} | {error, not_found}. +find_room_pid(HostType, MucHost, Room) -> + Args = [HostType, MucHost, Room], + mongoose_backend:call(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +-spec get_online_rooms(mongooseim:host_type(), jid:lserver()) -> + [mod_muc:muc_online_room()]. +get_online_rooms(HostType, MucHost) -> + Args = [HostType, MucHost], + mongoose_backend:call_tracked(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +-spec node_cleanup(mongooseim:host_type(), node()) -> ok. +node_cleanup(HostType, Node) -> + Args = [HostType, Node], + mongoose_backend:call(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +clear_table(HostType) -> + mongoose_backend:call(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, [HostType]). diff --git a/src/muc/mongoose_muc_online_cets.erl b/src/muc/mongoose_muc_online_cets.erl new file mode 100644 index 00000000000..f0c026f81eb --- /dev/null +++ b/src/muc/mongoose_muc_online_cets.erl @@ -0,0 +1,113 @@ +-module(mongoose_muc_online_cets). +-behaviour(mongoose_muc_online_backend). + +-export([start/2, + register_room/4, + room_destroyed/4, + find_room_pid/3, + get_online_rooms/2, + node_cleanup/2, + clear_table/1]). + +-export([handle_conflict/2]). + +-include_lib("mod_muc.hrl"). + +%% Use MucHost first for prefix select optimization in get_online_rooms +%% We also don't want to send HostType in muc_online_room.host_type between CETS nodes +%% or store it +-type muc_tuple() :: {{MucHost :: jid:lserver(), Room :: mod_muc:room()}, Pid :: pid()}. + +table_name(HostType) -> + binary_to_atom(<<"cets_muc_online_room_", HostType/binary>>). + +-spec start(mongooseim:host_type(), gen_mod:module_opts()) -> ok. +start(HostType, _Opts) -> + Tab = table_name(HostType), + %% Non-random, non-node-specific keys + %% This means that default merging would not work + cets:start(Tab, #{handle_conflict => fun ?MODULE:handle_conflict/2}), + cets_discovery:add_table(mongoose_cets_discovery, Tab), + ok. + +%% We should keep one room and stop another room +%% But stopping logic needs to be tested heavily and designed +%% because we would need to figure out how to send presences to participants +%% (and maybe document how to rejoin the kicked room) +-spec handle_conflict(muc_tuple(), muc_tuple()) -> muc_tuple(). +handle_conflict(Rec1, Rec2) when Rec1 > Rec2 -> + Rec1; +handle_conflict(_Rec1, Rec2) -> + Rec2. + +-spec register_room( + HostType :: mongooseim:host_type(), + MucHost :: jid:lserver(), + Room :: mod_muc:room(), + Pid :: pid()) -> ok | {exists, pid()} | {error, term()}. +register_room(HostType, MucHost, Room, Pid) -> + register_room(HostType, MucHost, Room, Pid, 3). + +register_room(_HostType, _MucHost, _Room, _Pid, 0) -> + {error, failed_to_register}; +register_room(HostType, MucHost, Room, Pid, Retries) -> + Tab = table_name(HostType), + Rec = {{MucHost, Room}, Pid}, + case find_room_pid(HostType, MucHost, Room) of + {ok, OtherPid} -> + {exists, OtherPid}; + {error, not_found} -> + case cets:insert_new(Tab, Rec) of + true -> + ok; + false -> + register_room(HostType, MucHost, Room, Pid, Retries - 1) + end + end. + +%% Race condition is possible between register and room_destroyed +%% (Because register is outside of the room process) +-spec room_destroyed(mongooseim:host_type(), jid:lserver(), mod_muc:room(), pid()) -> ok. +room_destroyed(HostType, MucHost, Room, Pid) -> + Tab = table_name(HostType), + Rec = {{MucHost, Room}, Pid}, + cets:delete_object(Tab, Rec), + ok. + +-spec find_room_pid(mongooseim:host_type(), jid:server(), mod_muc:room()) -> + {ok, pid()} | {error, not_found}. +find_room_pid(HostType, MucHost, Room) -> + Tab = table_name(HostType), + case ets:lookup(Tab, {MucHost, Room}) of + [{_, Pid}] -> + {ok, Pid}; + [] -> + {error, not_found} + end. + +%% This is used by MUC discovery but it is not very scalable. +%% This function should look like get_online_rooms(HostType, MucHost, AfterRoomName, Limit) +%% to reduce the load and still have pagination working. +-spec get_online_rooms(mongooseim:host_type(), jid:lserver()) -> + [mod_muc:muc_online_room()]. +get_online_rooms(HostType, MucHost) -> + Tab = table_name(HostType), + [#muc_online_room{name_host = {Room, MucHost}, pid = Pid, host_type = HostType} + || [Room, Pid] <- ets:match(Tab, {{MucHost, '$1'}, '$2'})]. + +-spec node_cleanup(mongooseim:host_type(), node()) -> ok. +node_cleanup(HostType, Node) -> + Tab = table_name(HostType), + Pattern = {'_', '$1'}, + Guard = {'==', {node, '$1'}, Node}, + ets:select_delete(Tab, [{Pattern, [Guard], [true]}]), + ok. + +%% Clear table for tests +-spec clear_table(mongooseim:host_type()) -> ok. +clear_table(HostType) -> + Tab = table_name(HostType), + ets:match_delete(Tab, '_'), + Nodes = cets:other_nodes(Tab), + [rpc:call(Node, ets, match_delete, [Tab, '_']) || Node <- Nodes], + ok. diff --git a/src/muc/mongoose_muc_online_mnesia.erl b/src/muc/mongoose_muc_online_mnesia.erl new file mode 100644 index 00000000000..6699b775744 --- /dev/null +++ b/src/muc/mongoose_muc_online_mnesia.erl @@ -0,0 +1,94 @@ +-module(mongoose_muc_online_mnesia). +-behaviour(mongoose_muc_online_backend). + +-export([start/2, + register_room/4, + room_destroyed/4, + find_room_pid/3, + get_online_rooms/2, + node_cleanup/2, + clear_table/1]). + +-include_lib("mod_muc.hrl"). + +-spec start(mongooseim:host_type(), gen_mod:module_opts()) -> ok. +start(_HostType, _Opts) -> + mnesia:create_table(muc_online_room, + [{ram_copies, [node()]}, + {attributes, record_info(fields, muc_online_room)}]), + mnesia:add_table_copy(muc_online_room, node(), ram_copies), + ok. + +-spec register_room( + HostType :: mongooseim:host_type(), + MucHost :: jid:lserver(), + Room :: mod_muc:room(), + Pid :: pid()) -> ok | {exists, pid()} | {error, term()}. +register_room(HostType, MucHost, Room, Pid) -> + F = fun() -> + case mnesia:read(muc_online_room, {Room, MucHost}, write) of + [] -> + mnesia:write(#muc_online_room{name_host = {Room, MucHost}, + host_type = HostType, + pid = Pid}); + [R] -> + {exists, R#muc_online_room.pid} + end + end, + simple_transaction_result(mnesia:transaction(F)). + +%% Race condition is possible between register and room_destroyed +%% (Because register is outside of the room process) +-spec room_destroyed(mongooseim:host_type(), jid:lserver(), mod_muc:room(), pid()) -> ok. +room_destroyed(HostType, MucHost, Room, Pid) -> + Obj = #muc_online_room{name_host = {Room, MucHost}, + host_type = HostType, pid = Pid}, + F = fun() -> mnesia:delete_object(Obj) end, + {atomic, ok} = mnesia:transaction(F), + ok. + +simple_transaction_result({atomic, Res}) -> + Res; +simple_transaction_result({aborted, Reason}) -> + {error, Reason}. + +-spec find_room_pid(mongooseim:host_type(), jid:server(), mod_muc:room()) -> + {ok, pid()} | {error, not_found}. +find_room_pid(_HostType, MucHost, Room) -> + case mnesia:dirty_read(muc_online_room, {Room, MucHost}) of + [R] -> + {ok, R#muc_online_room.pid}; + [] -> + {error, not_found} + end. + +-spec get_online_rooms(mongooseim:host_type(), jid:lserver()) -> + [mod_muc:muc_online_room()]. +get_online_rooms(_HostType, MucHost) -> + mnesia:dirty_select(muc_online_room, + [{#muc_online_room{name_host = '$1', _ = '_'}, + [{'==', {element, 2, '$1'}, MucHost}], + ['$_']}]). + +-spec node_cleanup(mongooseim:host_type(), node()) -> ok. +node_cleanup(HostType, Node) -> + F = fun() -> + Es = mnesia:select( + muc_online_room, + [{#muc_online_room{pid = '$1', + host_type = HostType, + _ = '_'}, + [{'==', {node, '$1'}, Node}], + ['$_']}]), + lists:foreach(fun(E) -> + mnesia:delete_object(E) + end, Es) + end, + mnesia:async_dirty(F), + ok. + +%% Clear table for tests +-spec clear_table(mongooseim:host_type()) -> ok. +clear_table(_HostType) -> + mnesia:clear_table(muc_online_room), + ok. diff --git a/src/s2s/mongoose_s2s_backend.erl b/src/s2s/mongoose_s2s_backend.erl new file mode 100644 index 00000000000..20c3982eefe --- /dev/null +++ b/src/s2s/mongoose_s2s_backend.erl @@ -0,0 +1,59 @@ +-module(mongoose_s2s_backend). + +-callback init(map()) -> ok. +-callback get_s2s_out_pids(ejabberd_s2s:fromto()) -> ejabberd_s2s:s2s_pids(). +-callback try_register(Pid :: pid(), + FromTo :: ejabberd_s2s:fromto()) -> boolean(). +-callback remove_connection(FromTo :: ejabberd_s2s:fromto(), Pid :: pid()) -> ok. +-callback node_cleanup(Node :: node()) -> term(). +-callback register_secret(HostType :: mongooseim:host_type(), + Secret :: ejabberd_s2s:base16_secret()) -> ok. +-callback get_shared_secret(mongooseim:host_type()) -> + {ok, ejabberd_s2s:base16_secret()} | {error, not_found}. + +-export([init/1, + get_s2s_out_pids/1, + try_register/2, + remove_connection/2, + node_cleanup/1]). + +-export([register_secret/2, + get_shared_secret/1]). + +-ignore_xref([behaviour_info/1]). + +-define(MAIN_MODULE, mongoose_s2s). + +-spec init(map()) -> ok. +init(Opts) -> + Args = [Opts], + mongoose_backend:init(global, ?MAIN_MODULE, [], Opts), + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + +-spec get_s2s_out_pids(ejabberd_s2s:fromto()) -> ejabberd_s2s:s2s_pids(). +get_s2s_out_pids(FromTo) -> + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [FromTo]). + +%% Register ejabberd_s2s_out connection +-spec try_register(Pid :: pid(), + FromTo :: ejabberd_s2s:fromto()) -> boolean(). +try_register(Pid, FromTo) -> + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [Pid, FromTo]). + +-spec remove_connection(FromTo :: ejabberd_s2s:fromto(), Pid :: pid()) -> ok. +remove_connection(FromTo, Pid) -> + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [FromTo, Pid]). + +-spec node_cleanup(Node :: node()) -> ok. +node_cleanup(Node) -> + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [Node]). + +-spec register_secret(HostType :: mongooseim:host_type(), + Secret :: ejabberd_s2s:base16_secret()) -> ok. +register_secret(HostType, Secret) -> + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [HostType, Secret]). + +-spec get_shared_secret(mongooseim:host_type()) -> + {ok, ejabberd_s2s:base16_secret()} | {error, not_found}. +get_shared_secret(HostType) -> + mongoose_backend:call(global, ?MAIN_MODULE, ?FUNCTION_NAME, [HostType]). diff --git a/src/s2s/mongoose_s2s_cets.erl b/src/s2s/mongoose_s2s_cets.erl new file mode 100644 index 00000000000..e97053f1551 --- /dev/null +++ b/src/s2s/mongoose_s2s_cets.erl @@ -0,0 +1,99 @@ +-module(mongoose_s2s_cets). +-behaviour(mongoose_s2s_backend). + +-export([init/1, + get_s2s_out_pids/1, + try_register/2, + remove_connection/2, + node_cleanup/1]). + +-export([register_secret/2, + get_shared_secret/1]). + +%% Internal usage (export so the callback would survive multiple code reloads) +-export([handle_secret_conflict/2]). + +-include("mongoose_logger.hrl"). + +-define(TABLE, cets_s2s_session). +-define(SECRET_TABLE, cets_s2s_secret). + +-type secret_tuple() :: {HostType :: mongooseim:host_type(), TS :: integer(), Secret :: ejabberd_s2s:base16_secret()}. + +-spec init(map()) -> ok. +init(_) -> + cets:start(?TABLE, #{}), + %% Non-random, non-node-specific keys + %% This means that default merging would not work + cets:start(?SECRET_TABLE, #{handle_conflict => fun ?MODULE:handle_secret_conflict/2}), + cets_discovery:add_table(mongoose_cets_discovery, ?TABLE), + cets_discovery:add_table(mongoose_cets_discovery, ?SECRET_TABLE), + ok. + +%% Chooses the most recent value of two. +%% Even if we choose the wrong record - nothing bad would happen +%% (we still need to choose one). +%% Choosing the record with the highest timestamp is just a logical behaviour +%% (it also matches the logic of mongoose_s2s_lib:check_shared_secret/2, where updated secret +%% in the config is updated across all nodes in the cluster). +%% Example call: +%% handle_secret_conflict({<<"localhost">>, 1689858975612268, <<"4e48dc4898b23f512059">>}, +%% {<<"localhost">>, 1689859177195451, <<"56fdcd3ec63ff8299eb0">>}). +-spec handle_secret_conflict(secret_tuple(), secret_tuple()) -> secret_tuple(). +handle_secret_conflict(Rec1, Rec2) when Rec1 > Rec2 -> + Rec1; +handle_secret_conflict(_Rec1, Rec2) -> + Rec2. + +%% Pid lists +-spec get_s2s_out_pids(ejabberd_s2s:fromto()) -> ejabberd_s2s:s2s_pids(). +get_s2s_out_pids(FromTo) -> + R = {{FromTo, '$1'}}, + ets:select(?TABLE, [{R, [], ['$1']}]). + +-spec try_register(Pid :: pid(), + FromTo :: ejabberd_s2s:fromto()) -> boolean(). +try_register(Pid, FromTo) -> + Pids = get_s2s_out_pids(FromTo), + case mongoose_s2s_lib:need_more_connections(FromTo, Pids) of + true -> + cets:insert(?TABLE, {{FromTo, Pid}}), + true; + false -> + false + end. + +-spec remove_connection(FromTo :: ejabberd_s2s:fromto(), Pid :: pid()) -> ok. +remove_connection(FromTo, Pid) -> + cets:delete(?TABLE, {FromTo, Pid}), + ok. + +%% node_cleanup is called on each node in the cluster, when Node is down +-spec node_cleanup(Node :: node()) -> ok. +node_cleanup(Node) -> + KeyPattern = {'_', '$1'}, + R = {KeyPattern}, + Guard = {'==', {node, '$1'}, Node}, + ets:select_delete(?TABLE, [{R, [Guard], [true]}]), + ok. + +%% Secrets +-spec register_secret(HostType :: mongooseim:host_type(), + Secret :: ejabberd_s2s:base16_secret()) -> ok. +register_secret(HostType, Secret) -> + %% We store timestamp so we could use it when merging two tables when clustering. + %% Secrets is a very small table and get_shared_secret is called rarely, + %% so having an extra field is not a problem. + TS = erlang:system_time(microsecond), + cets:insert(?SECRET_TABLE, {HostType, TS, Secret}), + ok. + +-spec get_shared_secret(mongooseim:host_type()) -> + {ok, ejabberd_s2s:base16_secret()} | {error, not_found}. +get_shared_secret(HostType) -> + case ets:lookup(?SECRET_TABLE, HostType) of + [{_HostType, _TS, Secret}] -> + {ok, Secret}; + [] -> + {error, not_found} + end. diff --git a/src/s2s/mongoose_s2s_dialback.erl b/src/s2s/mongoose_s2s_dialback.erl new file mode 100644 index 00000000000..ea522a9e730 --- /dev/null +++ b/src/s2s/mongoose_s2s_dialback.erl @@ -0,0 +1,148 @@ +%% Steps for S2S Dialback. +%% Diagram from https://xmpp.org/extensions/xep-0220.html#intro-howitworks +%% +%% Initiating Receiving +%% Server Server +%% ----------- --------- +%% | | +%% | [if necessary, | +%% | perform DNS lookup | +%% | on Target Domain, | +%% | open TCP connection, | +%% | and establish stream] | +%% | -----------------------> | +%% | | Authoritative +%% | send dialback key | Server +%% | -------(STEP 1)--------> | ------------- +%% | | | +%% | | [if necessary, | +%% | | perform DNS lookup, | +%% | | on Sender Domain, | +%% | | open TCP connection, | +%% | | and establish stream] | +%% | | -----------------------> | +%% | | | +%% | | send verify request | +%% | | -------(STEP 2)--------> | +%% | | | +%% | | send verify response | +%% | | <------(STEP 3)--------- | +%% | | +%% | report dialback result | +%% | <-------(STEP 4)-------- | +%% | | + +%% Because db:result and db:verify tags are confusing, use step numbers. +%% (db:result should've been named db:key). + +-module(mongoose_s2s_dialback). +-export([step_1/2, + step_2/3, + step_3/3, + step_4/2]). + +-export([parse_key/1, + parse_validity/1]). + +-export([make_key/3]). + +-xep([{xep, 185}, {version, "1.0"}]). %% Dialback Key Generation and Validation +-xep([{xep, 220}, {version, "1.1.1"}]). %% Server Dialback + +-include("mongoose.hrl"). +-include("jlib.hrl"). + +%% Initiating server sends dialback key +%% https://xmpp.org/extensions/xep-0220.html#example-1 +-spec step_1(ejabberd_s2s:fromto(), ejabberd_s2s:s2s_dialback_key()) -> exml:element(). +step_1(FromTo, Key) -> + #xmlel{name = <<"db:result">>, + attrs = fromto_to_attrs(FromTo), + children = [#xmlcdata{content = Key}]}. + +%% Receiving server sends verification request to authoritative server (step 2) +-spec step_2(ejabberd_s2s:fromto(), ejabberd_s2s:s2s_dialback_key(), ejabberd_s2s:stream_id()) -> exml:element(). +step_2(FromTo, Key, StreamID) -> + #xmlel{name = <<"db:verify">>, + attrs = [{<<"id">>, StreamID} | fromto_to_attrs(FromTo)], + children = [#xmlcdata{content = Key}]}. + +%% Receiving server is informed by authoritative server that key is valid or invalid (step 3) +-spec step_3(ejabberd_s2s:fromto(), ejabberd_s2s:stream_id(), boolean()) -> exml:element(). +step_3(FromTo, StreamID, IsValid) -> + #xmlel{name = <<"db:verify">>, + attrs = [{<<"id">>, StreamID}, + {<<"type">>, is_valid_to_type(IsValid)} + | fromto_to_attrs(FromTo)]}. + +%% Receiving server sends valid or invalid verification result to initiating server (step 4) +-spec step_4(ejabberd_s2s:fromto(), boolean()) -> exml:element(). +step_4(FromTo, IsValid) -> + #xmlel{name = <<"db:result">>, + attrs = [{<<"type">>, is_valid_to_type(IsValid)} + | fromto_to_attrs(FromTo)]}. + +-spec fromto_to_attrs(ejabberd_s2s:fromto()) -> [{binary(), binary()}]. +fromto_to_attrs({LocalServer, RemoteServer}) -> + [{<<"from">>, LocalServer}, {<<"to">>, RemoteServer}]. + +is_valid_to_type(true) -> <<"valid">>; +is_valid_to_type(false) -> <<"invalid">>. + +-spec parse_key(exml:element()) -> false + | {Step :: step_1 | step_2, + FromTo :: ejabberd_s2s:fromto(), + StreamID :: ejabberd_s2s:stream_id(), + Key :: ejabberd_s2s:s2s_dialback_key()}. +parse_key(El = #xmlel{name = <<"db:result">>}) -> + %% Initiating Server Sends Dialback Key (Step 1) + parse_key(step_1, El); +parse_key(El = #xmlel{name = <<"db:verify">>}) -> + %% Receiving Server Sends Verification Request to Authoritative Server (Step 2) + parse_key(step_2, El); +parse_key(_) -> + false. + +parse_key(Step, El) -> + FromTo = parse_from_to(El), + StreamID = exml_query:attr(El, <<"id">>, <<>>), + Key = exml_query:cdata(El), + {Step, FromTo, StreamID, Key}. + +%% Parse dialback verification result. +%% Verification result is stored in the `type' attribute and could be `valid' or `invalid'. +-spec parse_validity(exml:element()) -> false + | {Step :: step_3 | step_4, + FromTo :: ejabberd_s2s:fromto(), + StreamID :: ejabberd_s2s:stream_id(), + IsValid :: boolean()}. +parse_validity(El = #xmlel{name = <<"db:verify">>}) -> + %% Receiving Server is Informed by Authoritative Server that Key is Valid or Invalid (Step 3) + parse_validity(step_3, El); +parse_validity(El = #xmlel{name = <<"db:result">>}) -> + %% Receiving Server Sends Valid or Invalid Verification Result to Initiating Server (Step 4) + parse_validity(step_4, El); +parse_validity(_) -> + false. + +parse_validity(Step, El) -> + FromTo = parse_from_to(El), + StreamID = exml_query:attr(El, <<"id">>, <<>>), + IsValid = exml_query:attr(El, <<"type">>) =:= <<"valid">>, + {Step, FromTo, StreamID, IsValid}. + +-spec parse_from_to(exml:element()) -> ejabberd_s2s:fromto(). +parse_from_to(El) -> + RemoteJid = jid:from_binary(exml_query:attr(El, <<"from">>, <<>>)), + LocalJid = jid:from_binary(exml_query:attr(El, <<"to">>, <<>>)), + #jid{luser = <<>>, lresource = <<>>, lserver = LRemoteServer} = RemoteJid, + #jid{luser = <<>>, lresource = <<>>, lserver = LLocalServer} = LocalJid, + %% We use fromto() as seen by ejabberd_s2s_out and ejabberd_s2s + {LLocalServer, LRemoteServer}. + +-spec make_key(ejabberd_s2s:fromto(), ejabberd_s2s:stream_id(), ejabberd_s2s:base16_secret()) -> + ejabberd_s2s:s2s_dialback_key(). +make_key({From, To}, StreamID, Secret) -> + SecretHashed = base16:encode(crypto:hash(sha256, Secret)), + HMac = crypto:mac(hmac, sha256, SecretHashed, [From, " ", To, " ", StreamID]), + base16:encode(HMac). diff --git a/src/s2s/mongoose_s2s_info.erl b/src/s2s/mongoose_s2s_info.erl new file mode 100644 index 00000000000..79a75679cb3 --- /dev/null +++ b/src/s2s/mongoose_s2s_info.erl @@ -0,0 +1,36 @@ +%% Get information about S2S connections on this node. +-module(mongoose_s2s_info). + +%% ejabberd API +-export([get_connections/1]). +-ignore_xref([get_connections/1]). + +-include("mongoose_logger.hrl"). + +-type direction() :: in | out. +-type supervisor_child_spec() :: { undefined, pid(), worker, [module()] }. +-type connection_info() :: ejabberd_s2s_in:connection_info() | ejabberd_s2s_out:connection_info(). + +%% @doc Get information about S2S connections of the specified type. +-spec get_connections(direction()) -> [connection_info()]. +get_connections(Type) -> + Specs = supervisor:which_children(type_to_supervisor(Type)), + [Conn || Spec <- Specs, Conn <- get_state_info(child_to_pid(Spec))]. + +%% Both supervisors are simple_one_for_one with temporary children processes. +-spec type_to_supervisor(direction()) -> atom(). +type_to_supervisor(in) -> ejabberd_s2s_in_sup; +type_to_supervisor(out) -> ejabberd_s2s_out_sup. + +-spec child_to_pid(supervisor_child_spec()) -> pid(). +child_to_pid({_, Pid, _, _}) -> Pid. + +-spec get_state_info(pid()) -> [connection_info()]. +get_state_info(Pid) when is_pid(Pid) -> + case gen_fsm_compat:sync_send_all_state_event(Pid, get_state_info) of + Info when is_map(Info) -> + [Info]; + Other -> + ?LOG_ERROR(#{what => s2s_get_state_info_failed, pid => Pid, reason => Other}), + [] + end. diff --git a/src/s2s/mongoose_s2s_lib.erl b/src/s2s/mongoose_s2s_lib.erl new file mode 100644 index 00000000000..e1df46831e9 --- /dev/null +++ b/src/s2s/mongoose_s2s_lib.erl @@ -0,0 +1,217 @@ +%% Library functions without side effects. +%% These functions do not change the state of the system or send any messages. +%% These functions do not write into Mnesia/CETS or read from it. +%% They could read the configuration table though. +%% There is one hook `mongoose_hooks:s2s_allow_host', that could cause some side effects +%% (it depends on the hook handlers). +-module(mongoose_s2s_lib). +-export([make_from_to/2, + timeout/0, + domain_utf8_to_ascii/1, + check_shared_secret/2, + lookup_certfile/1, + choose_pid/2, + need_more_connections/2, + needed_extra_connections_number_if_allowed/2, + allow_host/1, + commands/0]). + +-include("mongoose.hrl"). +-include("jlib.hrl"). +-include("ejabberd_commands.hrl"). + +-type fromto() :: ejabberd_s2s:fromto(). +-type s2s_pids() :: ejabberd_s2s:s2s_pids(). + +-define(DEFAULT_MAX_S2S_CONNECTIONS, 1). +-define(DEFAULT_MAX_S2S_CONNECTIONS_PER_NODE, 1). + +-spec make_from_to(From :: jid:jid(), To :: jid:jid()) -> fromto(). +make_from_to(#jid{lserver = FromServer}, #jid{lserver = ToServer}) -> + {FromServer, ToServer}. + +timeout() -> + 600000. + +%% Converts a UTF-8 domain to ASCII (IDNA) +-spec domain_utf8_to_ascii(jid:server()) -> jid:server() | false. +domain_utf8_to_ascii(Domain) -> + case catch idna:utf8_to_ascii(Domain) of + {'EXIT', _} -> + false; + AsciiDomain -> + list_to_binary(AsciiDomain) + end. + +-spec check_shared_secret(HostType, StoredSecretResult) -> ok | {update, NewSecret} when + HostType :: mongooseim:host_type(), + StoredSecretResult :: {ok, ejabberd_s2s:base16_secret()} | {error, not_found}, + NewSecret :: ejabberd_s2s:base16_secret(). +check_shared_secret(HostType, StoredSecretResult) -> + %% register_secret is replicated across all nodes. + %% So, when starting a node with updated secret in the config, + %% we would replace stored secret on all nodes at once. + %% There could be a small race condition when dialback key checks would get rejected, + %% But there would not be conflicts when some nodes have one secret stored and others - another. + case {StoredSecretResult, get_shared_secret_from_config(HostType)} of + {{error, not_found}, {ok, Secret}} -> + %% Write the secret from the config into Mnesia/CETS for the first time + {update, Secret}; + {{error, not_found}, {error, not_found}} -> + %% Write a random secret into Mnesia/CETS for the first time + {update, make_random_secret()}; + {{ok, Secret}, {ok, Secret}} -> + %% Config matches Mnesia/CETS + ok; + {{ok, _OldSecret}, {ok, NewSecret}} -> + ?LOG_INFO(#{what => overwrite_secret_from_config}), + {update, NewSecret}; + {{ok, _OldSecret}, {error, not_found}} -> + %% Keep the secret already stored in Mnesia/CETS + ok + end. + +-spec make_random_secret() -> ejabberd_s2s:base16_secret(). +make_random_secret() -> + base16:encode(crypto:strong_rand_bytes(10)). + +-spec get_shared_secret_from_config(mongooseim:host_type()) -> + {ok, ejabberd_s2s:base16_secret()} | {error, not_found}. +get_shared_secret_from_config(HostType) -> + mongoose_config:lookup_opt([{s2s, HostType}, shared]). + +-spec lookup_certfile(mongooseim:host_type()) -> {ok, string()} | {error, not_found}. +lookup_certfile(HostType) -> + case mongoose_config:lookup_opt({domain_certfile, HostType}) of + {ok, CertFile} -> + CertFile; + {error, not_found} -> + mongoose_config:lookup_opt([{s2s, HostType}, certfile]) + end. + +%% Prefers the local connection (i.e. not on the remote node) +-spec choose_pid(From :: jid:jid(), Pids :: s2s_pids()) -> pid(). +choose_pid(From, [_|_] = Pids) -> + Pids1 = case filter_local_pids(Pids) of + [] -> Pids; + FilteredPids -> FilteredPids + end, + % Use sticky connections based on the JID of the sender + % (without the resource to ensure that a muc room always uses the same connection) + Pid = lists:nth(erlang:phash2(jid:to_bare(From), length(Pids1)) + 1, Pids1), + ?LOG_DEBUG(#{what => s2s_choose_pid, from => From, s2s_pid => Pid}), + Pid. + +%% Returns only pids from the current node. +-spec filter_local_pids(s2s_pids()) -> s2s_pids(). +filter_local_pids(Pids) -> + Node = node(), + [Pid || Pid <- Pids, node(Pid) == Node]. + +-spec max_s2s_connections(fromto()) -> pos_integer(). +max_s2s_connections(FromTo) -> + match_integer_acl_rule(FromTo, max_s2s_connections, + ?DEFAULT_MAX_S2S_CONNECTIONS). + +-spec max_s2s_connections_per_node(fromto()) -> pos_integer(). +max_s2s_connections_per_node(FromTo) -> + match_integer_acl_rule(FromTo, max_s2s_connections_per_node, + ?DEFAULT_MAX_S2S_CONNECTIONS_PER_NODE). + +-spec match_integer_acl_rule(fromto(), atom(), integer()) -> term(). +match_integer_acl_rule({FromServer, ToServer}, Rule, Default) -> + {ok, HostType} = mongoose_domain_api:get_host_type(FromServer), + ToServerJid = jid:make(<<>>, ToServer, <<>>), + case acl:match_rule(HostType, Rule, ToServerJid) of + Int when is_integer(Int) -> Int; + _ -> Default + end. + +-spec needed_extra_connections_number_if_allowed(fromto(), s2s_pids()) -> non_neg_integer(). +needed_extra_connections_number_if_allowed(FromTo, OldCons) -> + case is_s2s_allowed_for_host(FromTo, OldCons) of + true -> + needed_extra_connections_number(FromTo, OldCons); + false -> + 0 + end. + +%% Checks: +%% - if the host is not a service +%% - and host policy (allowlist or denylist) +-spec is_s2s_allowed_for_host(fromto(), _OldConnections :: s2s_pids()) -> boolean(). +is_s2s_allowed_for_host(_FromTo, [_|_]) -> + true; %% Has outgoing connections established, skip the check +is_s2s_allowed_for_host(FromTo, []) -> + not is_service(FromTo) andalso allow_host(FromTo). + +%% Checks if the s2s host is not in the denylist or is in the allowlist +%% Runs a hook +-spec allow_host(fromto()) -> boolean(). +allow_host({FromServer, ToServer}) -> + case mongoose_domain_api:get_host_type(FromServer) of + {error, not_found} -> + false; + {ok, HostType} -> + case mongoose_config:lookup_opt([{s2s, HostType}, host_policy, ToServer]) of + {ok, allow} -> + true; + {ok, deny} -> + false; + {error, not_found} -> + mongoose_config:get_opt([{s2s, HostType}, default_policy]) =:= allow + andalso mongoose_hooks:s2s_allow_host(FromServer, ToServer) =:= allow + end + end. + +-spec need_more_connections(fromto(), s2s_pids()) -> boolean(). +need_more_connections(FromTo, Connections) -> + needed_extra_connections_number(FromTo, Connections) > 0. + +-spec needed_extra_connections_number(fromto(), s2s_pids()) -> non_neg_integer(). +needed_extra_connections_number(FromTo, Connections) -> + MaxConnections = max_s2s_connections(FromTo), + MaxConnectionsPerNode = max_s2s_connections_per_node(FromTo), + LocalPids = filter_local_pids(Connections), + lists:min([MaxConnections - length(Connections), + MaxConnectionsPerNode - length(LocalPids)]). + +%% Returns true if the destination must be considered as a service. +-spec is_service(ejabberd_s2s:fromto()) -> boolean(). +is_service({FromServer, ToServer} = _FromTo) -> + case mongoose_config:lookup_opt({route_subdomains, FromServer}) of + {ok, s2s} -> % bypass RFC 3920 10.3 + false; + {error, not_found} -> + Hosts = ?MYHOSTS, + P = fun(ParentDomain) -> lists:member(ParentDomain, Hosts) end, + lists:any(P, parent_domains(ToServer)) + end. + +-spec parent_domains(jid:lserver()) -> [jid:lserver()]. +parent_domains(Domain) -> + parent_domains(Domain, [Domain]). + +parent_domains(<<>>, Acc) -> + lists:reverse(Acc); +parent_domains(<<$., Rest/binary>>, Acc) -> + parent_domains(Rest, [Rest | Acc]); +parent_domains(<<_, Rest/binary>>, Acc) -> + parent_domains(Rest, Acc). + +-spec commands() -> [ejabberd_commands:cmd()]. +commands() -> + [ + #ejabberd_commands{name = incoming_s2s_number, + tags = [stats, s2s], + desc = "Number of incoming s2s connections on the node", + module = stats_api, function = incoming_s2s_number, + args = [], + result = {s2s_incoming, integer}}, + #ejabberd_commands{name = outgoing_s2s_number, + tags = [stats, s2s], + desc = "Number of outgoing s2s connections on the node", + module = stats_api, function = outgoing_s2s_number, + args = [], + result = {s2s_outgoing, integer}} + ]. diff --git a/src/s2s/mongoose_s2s_mnesia.erl b/src/s2s/mongoose_s2s_mnesia.erl new file mode 100644 index 00000000000..e09ec15acc0 --- /dev/null +++ b/src/s2s/mongoose_s2s_mnesia.erl @@ -0,0 +1,119 @@ +-module(mongoose_s2s_mnesia). +-behaviour(mongoose_s2s_backend). + +-export([init/1, + get_s2s_out_pids/1, + try_register/2, + remove_connection/2, + node_cleanup/1]). + +-export([register_secret/2, + get_shared_secret/1]). + +-record(s2s, { + fromto :: ejabberd_s2s:fromto() | '_', + pid :: pid() | '$1' + }). + +-record(s2s_shared, { + host_type :: mongooseim:host_type(), + secret :: ejabberd_s2s:base16_secret() + }). + +-include("mongoose_logger.hrl"). + +-spec init(map()) -> ok. +init(_) -> + init_pids(), + init_secrets(), + ok. + +%% Pid lists +init_pids() -> + Opts = [{ram_copies, [node()]}, {type, bag}, + {attributes, record_info(fields, s2s)}], + mnesia:create_table(s2s, Opts), + mnesia:add_table_copy(s2s, node(), ram_copies). + +-spec get_s2s_out_pids(ejabberd_s2s:fromto()) -> ejabberd_s2s:s2s_pids(). +get_s2s_out_pids(FromTo) -> + s2s_to_pids(mnesia:dirty_read(s2s, FromTo)). + +-spec try_register(Pid :: pid(), + FromTo :: ejabberd_s2s:fromto()) -> boolean(). +try_register(Pid, FromTo) -> + F = fun() -> + Pids = get_s2s_out_pids(FromTo), + case mongoose_s2s_lib:need_more_connections(FromTo, Pids) of + true -> + mnesia:write(#s2s{fromto = FromTo, pid = Pid}), + true; + false -> + false + end + end, + case mnesia:transaction(F) of + {atomic, Bool} -> + Bool; + Other -> + ?LOG_ERROR(#{what => s2s_try_register_failed, + s2s_pid => Pid, from_to => FromTo, + reason => Other}), + false + end. + +-spec remove_connection(FromTo :: ejabberd_s2s:fromto(), Pid :: pid()) -> ok. +remove_connection(FromTo, Pid) -> + Rec = #s2s{fromto = FromTo, pid = Pid}, + F = fun() -> + mnesia:delete_object(Rec) + end, + case mnesia:transaction(F) of + {atomic, _} -> + ok; + Other -> + ?LOG_ERROR(#{what => s2s_remove_connection, + from_to => FromTo, reason => Other}), + ok + end. + +-spec node_cleanup(Node :: node()) -> ok. +node_cleanup(Node) -> + F = fun() -> + Es = mnesia:select( + s2s, + [{#s2s{pid = '$1', _ = '_'}, + [{'==', {node, '$1'}, Node}], + ['$_']}]), + lists:foreach(fun(E) -> + mnesia:delete_object(E) + end, Es) + end, + mnesia:async_dirty(F), + ok. + +s2s_to_pids(List) -> + [Pid || #s2s{pid = Pid} <- List]. + +%% Secrets +init_secrets() -> + Opts = [{ram_copies, [node()]}, {attributes, record_info(fields, s2s_shared)}], + mnesia:create_table(s2s_shared, Opts), + mnesia:add_table_copy(s2s_shared, node(), ram_copies). + +-spec register_secret(HostType :: mongooseim:host_type(), + Secret :: ejabberd_s2s:base16_secret()) -> ok. +register_secret(HostType, Secret) -> + Rec = #s2s_shared{host_type = HostType, secret = Secret}, + {atomic, _} = mnesia:transaction(fun() -> mnesia:write(Rec) end), + ok. + +-spec get_shared_secret(mongooseim:host_type()) -> + {ok, ejabberd_s2s:base16_secret()} | {error, not_found}. +get_shared_secret(HostType) -> + case mnesia:dirty_read(s2s_shared, HostType) of + [#s2s_shared{secret = Secret}] -> + {ok, Secret}; + [] -> + {error, not_found} + end. diff --git a/src/stream_management/mod_stream_management.erl b/src/stream_management/mod_stream_management.erl index 868f53dbf48..c92a973ddc6 100644 --- a/src/stream_management/mod_stream_management.erl +++ b/src/stream_management/mod_stream_management.erl @@ -79,8 +79,9 @@ start(HostType, Opts) -> ?LOG_INFO(#{what => stream_management_starting}). -spec stop(mongooseim:host_type()) -> ok. -stop(_HostType) -> +stop(HostType) -> ?LOG_INFO(#{what => stream_management_stopping}), + mod_stream_management_backend:stop(HostType), ok. -spec hooks(mongooseim:host_type()) -> gen_hook:hook_list(). diff --git a/src/stream_management/mod_stream_management_backend.erl b/src/stream_management/mod_stream_management_backend.erl index d7c0b95692d..c9f37381278 100644 --- a/src/stream_management/mod_stream_management_backend.erl +++ b/src/stream_management/mod_stream_management_backend.erl @@ -1,5 +1,6 @@ -module(mod_stream_management_backend). -export([init/2, + stop/1, register_smid/3, unregister_smid/2, get_sid/2]). @@ -18,6 +19,9 @@ HostType :: mongooseim:host_type(), Opts :: gen_mod:module_opts(). +-callback stop(HostType) -> ok when + HostType :: mongooseim:host_type(). + -callback register_smid(HostType, SMID, SID) -> ok | {error, term()} when HostType :: mongooseim:host_type(), @@ -58,6 +62,11 @@ init(HostType, Opts) -> Args = [HostType, Opts], mongoose_backend:call(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). +-spec stop(HostType :: mongooseim:host_type()) -> ok. +stop(HostType) -> + Args = [HostType], + mongoose_backend:call(HostType, ?MAIN_MODULE, ?FUNCTION_NAME, Args). + -spec register_smid(HostType, SMID, SID) -> ok | {error, term()} when HostType :: mongooseim:host_type(), diff --git a/src/stream_management/mod_stream_management_cets.erl b/src/stream_management/mod_stream_management_cets.erl new file mode 100644 index 00000000000..058fcd84db5 --- /dev/null +++ b/src/stream_management/mod_stream_management_cets.erl @@ -0,0 +1,110 @@ +-module(mod_stream_management_cets). +-behaviour(mod_stream_management_backend). + +-include("mongoose.hrl"). +-include("jlib.hrl"). +-include_lib("stdlib/include/ms_transform.hrl"). + +-export([init/2, + stop/1, + register_smid/3, + unregister_smid/2, + get_sid/2]). + +-export([read_stale_h/2, + write_stale_h/3, + delete_stale_h/2]). + +-export([clear_table/2]). + +-ignore_xref([start_link/1]). + +-define(TABLE, cets_stream_management_session). +-define(TABLE_H, cets_stream_management_stale_h). + +init(HostType, #{stale_h := StaleOpts}) -> + cets:start(?TABLE, #{}), + cets_discovery:add_table(mongoose_cets_discovery, ?TABLE), + maybe_init_stale_h(HostType, StaleOpts), + ok. + +stop(HostType) -> + stop_cleaner(HostType). + +maybe_init_stale_h(HostType, StaleOpts = #{enabled := true}) -> + cets:start(?TABLE_H, #{}), + cets_discovery:add_table(mongoose_cets_discovery, ?TABLE_H), + start_cleaner(HostType, StaleOpts); +maybe_init_stale_h(_, _) -> ok. + +-spec register_smid(HostType, SMID, SID) -> ok when + HostType :: mongooseim:host_type(), + SMID :: mod_stream_management:smid(), + SID :: ejabberd_sm:sid(). +register_smid(_HostType, SMID, SID) -> + cets:insert_many(?TABLE, [{{sid, SID}, SMID}, {{smid, SMID}, SID}]). + +-spec unregister_smid(mongooseim:host_type(), ejabberd_sm:sid()) -> + {ok, SMID :: mod_stream_management:smid()} | {error, smid_not_found}. +unregister_smid(_HostType, SID) -> + case ets:lookup(?TABLE, {sid, SID}) of + [] -> + {error, smid_not_found}; + [{_, SMID}] -> + cets:delete_many(?TABLE, [{sid, SID}, {smid, SMID}]), + {ok, SMID} + end. + +-spec get_sid(mongooseim:host_type(), mod_stream_management:smid()) -> + {sid, ejabberd_sm:sid()} | {error, smid_not_found}. +get_sid(_HostType, SMID) -> + case ets:lookup(?TABLE, {smid, SMID}) of + [] -> + {error, smid_not_found}; + [{_, SID}] -> + {sid, SID} + end. + +%% stale_h functions + +-spec read_stale_h(HostType, SMID) -> + {stale_h, non_neg_integer()} | {error, smid_not_found} when + HostType :: mongooseim:host_type(), + SMID :: mod_stream_management:smid(). +read_stale_h(_HostType, SMID) -> + case ets:lookup(?TABLE_H, SMID) of + [] -> + {error, smid_not_found}; + [{_, H, _}] -> + {stale_h, H} + end. + +-spec write_stale_h(HostType, SMID, H) -> ok when + HostType :: mongooseim:host_type(), + SMID :: mod_stream_management:smid(), + H :: non_neg_integer(). +write_stale_h(_HostType, SMID, H) -> + Stamp = erlang:monotonic_time(second), + cets:insert(?TABLE_H, {SMID, H, Stamp}). + +-spec delete_stale_h(HostType, SMID) -> ok when + HostType :: mongooseim:host_type(), + SMID :: mod_stream_management:smid(). +delete_stale_h(_HostType, SMID) -> + cets:delete(?TABLE_H, SMID). + +%% stale_h cleaning logic + +start_cleaner(HostType, #{repeat_after := Interval, geriatric := TTL}) -> + %% TODO cleaner should be a service + WOpts = #{host_type => HostType, action => fun ?MODULE:clear_table/2, + opts => TTL, interval => timer:seconds(Interval)}, + mongoose_collector:start_common(?MODULE, HostType, WOpts). + +stop_cleaner(HostType) -> + mongoose_collector:stop_common(?MODULE, HostType). + +clear_table(_HostType, GeriatricAge) -> + TimeToDie = erlang:monotonic_time(second) - GeriatricAge, + MS = ets:fun2ms(fun({_, _, S}) when S < TimeToDie -> true end), + ets:select_delete(?TABLE_H, MS). diff --git a/src/stream_management/mod_stream_management_mnesia.erl b/src/stream_management/mod_stream_management_mnesia.erl index d441c360afc..c455fd01a67 100644 --- a/src/stream_management/mod_stream_management_mnesia.erl +++ b/src/stream_management/mod_stream_management_mnesia.erl @@ -5,6 +5,7 @@ -include_lib("stdlib/include/ms_transform.hrl"). -export([init/2, + stop/1, register_smid/3, unregister_smid/2, get_sid/2]). @@ -33,6 +34,9 @@ init(HostType, #{stale_h := StaleOpts}) -> maybe_init_stale_h(HostType, StaleOpts), ok. +stop(HostType) -> + stop_cleaner(HostType). + maybe_init_stale_h(HostType, StaleOpts = #{enabled := true}) -> ?LOG_INFO(#{what => stream_mgmt_stale_h_start}), mnesia:create_table(stream_mgmt_stale_h, @@ -116,13 +120,13 @@ delete_stale_h(_HostType, SMID) -> %% stale_h cleaning logic start_cleaner(HostType, #{repeat_after := Interval, geriatric := TTL}) -> - Name = gen_mod:get_module_proc(HostType, stream_management_stale_h), + %% TODO cleaner should be a service WOpts = #{host_type => HostType, action => fun ?MODULE:clear_table/2, opts => TTL, interval => timer:seconds(Interval)}, - MFA = {mongoose_collector, start_link, [Name, WOpts]}, - ChildSpec = {Name, MFA, permanent, 5000, worker, [?MODULE]}, - %% TODO cleaner should be a service - ejabberd_sup:start_child(ChildSpec). + mongoose_collector:start_common(?MODULE, HostType, WOpts). + +stop_cleaner(HostType) -> + mongoose_collector:stop_common(?MODULE, HostType). clear_table(_HostType, GeriatricAge) -> TimeToDie = erlang:monotonic_time(second) - GeriatricAge, diff --git a/src/system_metrics/mongoose_system_metrics_collector.erl b/src/system_metrics/mongoose_system_metrics_collector.erl index e658d7aaea0..0432a5d6de0 100644 --- a/src/system_metrics/mongoose_system_metrics_collector.erl +++ b/src/system_metrics/mongoose_system_metrics_collector.erl @@ -129,8 +129,8 @@ get_version() -> end. get_components() -> - Domains = mongoose_router:get_all_domains() ++ ejabberd_router:dirty_get_all_components(all), - Components = [ejabberd_router:lookup_component(D, node()) || D <- Domains], + Domains = mongoose_router:get_all_domains() ++ mongoose_component:dirty_get_all_components(all), + Components = [mongoose_component:lookup_component(D, node()) || D <- Domains], LenComponents = length(lists:flatten(Components)), #{component => LenComponents}. diff --git a/src/system_metrics/mongoose_system_metrics_sender.erl b/src/system_metrics/mongoose_system_metrics_sender.erl index 41f758310b2..694a4433bd3 100644 --- a/src/system_metrics/mongoose_system_metrics_sender.erl +++ b/src/system_metrics/mongoose_system_metrics_sender.erl @@ -4,8 +4,6 @@ -export([send/3]). --type google_analytics_report() :: string(). --type url() :: string(). -type report_struct() :: mongoose_system_metrics_collector:report_struct(). -spec send(service_mongoose_system_metrics:client_id(), diff --git a/test/common/config_parser_helper.erl b/test/common/config_parser_helper.erl index 2ebd433f61c..7dc6e831e8e 100644 --- a/test/common/config_parser_helper.erl +++ b/test/common/config_parser_helper.erl @@ -14,6 +14,7 @@ options("host_types") -> [<<"this is host type">>, <<"some host type">>, <<"another host type">>, <<"yet another host type">>]}, {hosts, [<<"localhost">>]}, + {internal_databases, #{}}, {language, <<"en">>}, {listen, []}, {loglevel, warning}, @@ -26,6 +27,8 @@ options("host_types") -> #{event_cleaning_interval => 1000, event_max_age => 5000})}}, {sm_backend, mnesia}, + {component_backend, mnesia}, + {s2s_backend, mnesia}, {{s2s, <<"another host type">>}, default_s2s()}, {{s2s, <<"localhost">>}, default_s2s()}, {{s2s, <<"some host type">>}, default_s2s()}, @@ -62,6 +65,10 @@ options("miscellaneous") -> {hide_service_name, true}, {host_types, []}, {hosts, [<<"localhost">>, <<"anonymous.localhost">>]}, + {internal_databases, + #{cets => + #{backend => rdbms, cluster_name => mongooseim}, + mnesia => #{}}}, {language, <<"en">>}, {listen, [config([listen, http], @@ -92,6 +99,8 @@ options("miscellaneous") -> {{s2s, <<"anonymous.localhost">>}, default_s2s()}, {{s2s, <<"localhost">>}, default_s2s()}, {sm_backend, mnesia}, + {component_backend, mnesia}, + {s2s_backend, mnesia}, {{auth, <<"anonymous.localhost">>}, custom_auth()}, {{auth, <<"localhost">>}, custom_auth()}, {{modules, <<"anonymous.localhost">>}, #{}}, @@ -106,6 +115,7 @@ options("modules") -> {hide_service_name, false}, {host_types, []}, {hosts, [<<"localhost">>, <<"dummy_host">>]}, + {internal_databases, #{mnesia => #{}}}, {language, <<"en">>}, {listen, []}, {loglevel, warning}, @@ -118,6 +128,8 @@ options("modules") -> {{s2s, <<"dummy_host">>}, default_s2s()}, {{s2s, <<"localhost">>}, default_s2s()}, {sm_backend, mnesia}, + {component_backend, mnesia}, + {s2s_backend, mnesia}, {{auth, <<"dummy_host">>}, default_auth()}, {{auth, <<"localhost">>}, default_auth()}, {{modules, <<"dummy_host">>}, all_modules()}, @@ -131,6 +143,10 @@ options("mongooseim-pgsql") -> {host_types, []}, {hosts, [<<"localhost">>, <<"anonymous.localhost">>, <<"localhost.bis">>]}, + {internal_databases, + #{cets => + #{backend => rdbms, cluster_name => mongooseim}, + mnesia => #{}}}, {language, <<"en">>}, {listen, [config([listen, c2s], @@ -251,6 +267,8 @@ options("mongooseim-pgsql") -> #{initial_report => 300000, periodic_report => 10800000}}}, {sm_backend, mnesia}, + {component_backend, mnesia}, + {s2s_backend, mnesia}, {{auth, <<"anonymous.localhost">>}, (default_auth())#{anonymous => #{allow_multiple_connections => true, protocol => both}, @@ -299,6 +317,7 @@ options("outgoing_pools") -> {host_types, []}, {hosts, [<<"localhost">>, <<"anonymous.localhost">>, <<"localhost.bis">>]}, + {internal_databases, #{}}, {language, <<"en">>}, {listen, []}, {loglevel, warning}, @@ -345,6 +364,8 @@ options("outgoing_pools") -> {{s2s, <<"localhost">>}, default_s2s()}, {{s2s, <<"localhost.bis">>}, default_s2s()}, {sm_backend, mnesia}, + {component_backend, mnesia}, + {s2s_backend, mnesia}, {{auth, <<"anonymous.localhost">>}, default_auth()}, {{auth, <<"localhost">>}, default_auth()}, {{auth, <<"localhost.bis">>}, default_auth()}, @@ -360,6 +381,7 @@ options("s2s_only") -> {hide_service_name, false}, {host_types, []}, {hosts, [<<"localhost">>, <<"dummy_host">>]}, + {internal_databases, #{}}, {language, <<"en">>}, {listen, []}, {loglevel, warning}, @@ -370,6 +392,8 @@ options("s2s_only") -> {routing_modules, mongoose_router:default_routing_modules()}, {services, #{}}, {sm_backend, mnesia}, + {component_backend, mnesia}, + {s2s_backend, mnesia}, {{auth, <<"dummy_host">>}, default_auth()}, {{auth, <<"localhost">>}, default_auth()}, {{modules, <<"dummy_host">>}, #{}}, @@ -860,7 +884,7 @@ default_mod_config(mod_inbox) -> max_result_limit => infinity}; default_mod_config(mod_jingle_sip) -> #{proxy_host => "localhost", proxy_port => 5060, listen_port => 5600, local_host => "localhost", - sdp_origin => "127.0.0.1", transport => "udp", username_to_phone => []}; + sdp_origin => "127.0.0.1", transport => "udp", username_to_phone => [], backend => mnesia}; default_mod_config(mod_keystore) -> #{ram_key_size => 2048, keys => #{}}; default_mod_config(mod_last) -> @@ -882,6 +906,7 @@ default_mod_config(mod_mam_muc_rdbms_arch) -> db_jid_format => mam_jid_rfc}; default_mod_config(mod_muc) -> #{backend => mnesia, + online_backend => mnesia, host => {prefix,<<"conference.">>}, access => all, access_create => all, diff --git a/test/component_reg_SUITE.erl b/test/component_reg_SUITE.erl index 0f045c14fb6..6fb0d6b4914 100644 --- a/test/component_reg_SUITE.erl +++ b/test/component_reg_SUITE.erl @@ -36,15 +36,16 @@ end_per_suite(_C) -> opts() -> [{all_metrics_are_global, false}, + {component_backend, mnesia}, {routing_modules, [xmpp_router_a, xmpp_router_b, xmpp_router_c]}]. registering(_C) -> Dom = <<"aaa.bbb.com">>, - ejabberd_router:register_component(Dom, mongoose_packet_handler:new(?MODULE)), - Lookup = ejabberd_router:lookup_component(Dom), + {ok, Comps} = mongoose_component:register_components([Dom], node(), mongoose_packet_handler:new(?MODULE), false), + Lookup = mongoose_component:lookup_component(Dom), ?assertMatch([#external_component{}], Lookup), - ejabberd_router:unregister_component(Dom), - ?assertMatch([], ejabberd_router:lookup_component(Dom)), + mongoose_component:unregister_components(Comps), + ?assertMatch([], mongoose_component:lookup_component(Dom)), ok. registering_with_local(_C) -> @@ -53,37 +54,37 @@ registering_with_local(_C) -> ThisNode = node(), AnotherNode = 'another@nohost', Handler = mongoose_packet_handler:new(?MODULE), %% This handler is only for testing! - ejabberd_router:register_component(Dom, Handler), + {ok, Comps} = mongoose_component:register_components([Dom], node(), Handler, false), %% we can find it globally - ?assertMatch([#external_component{node = ThisNode}], ejabberd_router:lookup_component(Dom)), + ?assertMatch([#external_component{node = ThisNode}], mongoose_component:lookup_component(Dom)), %% and for this node ?assertMatch([#external_component{node = ThisNode}], - ejabberd_router:lookup_component(Dom, ThisNode)), + mongoose_component:lookup_component(Dom, ThisNode)), %% but not for another node - ?assertMatch([], ejabberd_router:lookup_component(Dom, AnotherNode)), + ?assertMatch([], mongoose_component:lookup_component(Dom, AnotherNode)), %% once we unregister it is not available - ejabberd_router:unregister_component(Dom), - ?assertMatch([], ejabberd_router:lookup_component(Dom)), - ?assertMatch([], ejabberd_router:lookup_component(Dom, ThisNode)), - ?assertMatch([], ejabberd_router:lookup_component(Dom, AnotherNode)), + mongoose_component:unregister_components(Comps), + ?assertMatch([], mongoose_component:lookup_component(Dom)), + ?assertMatch([], mongoose_component:lookup_component(Dom, ThisNode)), + ?assertMatch([], mongoose_component:lookup_component(Dom, AnotherNode)), %% we can register from both nodes - ejabberd_router:register_component(Dom, ThisNode, Handler), + {ok, Comps2} = mongoose_component:register_components([Dom], ThisNode, Handler, false), %% passing node here is only for testing - ejabberd_router:register_component(Dom, AnotherNode, Handler), + {ok, _Comps3} = mongoose_component:register_components([Dom], AnotherNode, Handler, false), %% both are reachable locally ?assertMatch([#external_component{node = ThisNode}], - ejabberd_router:lookup_component(Dom, ThisNode)), + mongoose_component:lookup_component(Dom, ThisNode)), ?assertMatch([#external_component{node = AnotherNode}], - ejabberd_router:lookup_component(Dom, AnotherNode)), + mongoose_component:lookup_component(Dom, AnotherNode)), %% if we try global lookup we get two handlers - ?assertMatch([_, _], ejabberd_router:lookup_component(Dom)), + ?assertMatch([_, _], mongoose_component:lookup_component(Dom)), %% we unregister one and the result is: - ejabberd_router:unregister_component(Dom), - ?assertMatch([], ejabberd_router:lookup_component(Dom, ThisNode)), + mongoose_component:unregister_components(Comps2), + ?assertMatch([], mongoose_component:lookup_component(Dom, ThisNode)), ?assertMatch([#external_component{node = AnotherNode}], - ejabberd_router:lookup_component(Dom)), + mongoose_component:lookup_component(Dom)), ?assertMatch([#external_component{node = AnotherNode}], - ejabberd_router:lookup_component(Dom, AnotherNode)), + mongoose_component:lookup_component(Dom, AnotherNode)), ok. process_packet(_From, _To, _Packet, _Extra) -> diff --git a/test/config_parser_SUITE.erl b/test/config_parser_SUITE.erl index abe163ce3fb..fbafcbbd9d6 100644 --- a/test/config_parser_SUITE.erl +++ b/test/config_parser_SUITE.erl @@ -52,6 +52,7 @@ all() -> {group, listen}, {group, auth}, {group, pool}, + {group, internal_databases}, {group, shaper_acl_access}, {group, s2s}, {group, modules}, @@ -74,6 +75,8 @@ groups() -> language, all_metrics_are_global, sm_backend, + component_backend, + s2s_backend, max_fsm_queue, http_server_name, rdbms_server_type, @@ -143,6 +146,7 @@ groups() -> pool_rabbit_connection, pool_ldap, pool_ldap_connection]}, + {internal_databases, [parallel], [internal_database_cets]}, {shaper_acl_access, [parallel], [shaper, acl, acl_merge_host_and_global, @@ -378,9 +382,22 @@ all_metrics_are_global(_Config) -> sm_backend(_Config) -> ?cfg(sm_backend, mnesia, #{}), % default ?cfg(sm_backend, mnesia, #{<<"general">> => #{<<"sm_backend">> => <<"mnesia">>}}), + ?cfg(sm_backend, cets, #{<<"general">> => #{<<"sm_backend">> => <<"cets">>}}), ?cfg(sm_backend, redis, #{<<"general">> => #{<<"sm_backend">> => <<"redis">>}}), ?err(#{<<"general">> => #{<<"sm_backend">> => <<"amnesia">>}}). +component_backend(_Config) -> + ?cfg(component_backend, mnesia, #{}), % default + ?cfg(component_backend, mnesia, #{<<"general">> => #{<<"component_backend">> => <<"mnesia">>}}), + ?cfg(component_backend, cets, #{<<"general">> => #{<<"component_backend">> => <<"cets">>}}), + ?err(#{<<"general">> => #{<<"component_backend">> => <<"amnesia">>}}). + +s2s_backend(_Config) -> + ?cfg(s2s_backend, mnesia, #{}), % default + ?cfg(s2s_backend, mnesia, #{<<"general">> => #{<<"s2s_backend">> => <<"mnesia">>}}), + ?err(#{<<"general">> => #{<<"s2s_backend">> => <<"redis">>}}), + ?err(#{<<"general">> => #{<<"s2s_backend">> => <<"amnesia">>}}). + max_fsm_queue(_Config) -> ?cfg(max_fsm_queue, 100, #{<<"general">> => #{<<"max_fsm_queue">> => 100}}), ?err(#{<<"general">> => #{<<"max_fsm_queue">> => -10}}). @@ -1159,6 +1176,30 @@ test_fast_tls_server(P, T) -> ?err(T(#{<<"versions">> => [<<"tlsv1.2">>]})), % option only for just_tls ?err(T(#{<<"protocol_options">> => [<<>>]})). +%% tests: internal_databases + +internal_database_cets(_Config) -> + CetsEnabled = #{<<"internal_databases">> => #{<<"cets">> => #{}}}, + CetsFile = #{<<"internal_databases">> => #{<<"cets">> => + #{<<"backend">> => <<"file">>, <<"node_list_file">> => <<"/dev/null">>}}}, + %% No internal_databases section means an empty list of databases + ?cfg([internal_databases], #{}, #{}), % default + %% Empty internal_databases could be configured explicitly + ?cfg([internal_databases], #{}, #{<<"internal_databases">> => #{}}), + + ?cfg([internal_databases, cets, backend], file, + #{<<"internal_databases">> => #{<<"cets">> => #{<<"backend">> => <<"file">>}}}), + ?cfg([internal_databases, cets, backend], rdbms, + #{<<"internal_databases">> => #{<<"cets">> => #{<<"cluster_name">> => <<"test">>}}}), + + ?cfg([internal_databases, cets, cluster_name], mongooseim, CetsEnabled), + ?cfg([internal_databases, cets, node_list_file], "/dev/null", CetsFile), + %% If only mnesia section is defined, CETS section is not included + ?cfg([internal_databases], #{mnesia => #{}}, + #{<<"internal_databases">> => #{<<"mnesia">> => #{}}}), + ?err(#{<<"internal_databases">> => #{<<"cets">> => #{<<"backend">> => <<"mnesia">>}}}), + ?err(#{<<"internal_databases">> => #{<<"cets">> => #{<<"cluster_name">> => 123}}}). + %% tests: shaper, acl, access shaper(_Config) -> ?cfg([shaper, normal], #{max_rate => 1000}, @@ -1872,6 +1913,7 @@ mod_jingle_sip(_Config) -> check_module_defaults(mod_jingle_sip), T = fun(Opts) -> #{<<"modules">> => #{<<"mod_jingle_sip">> => Opts}} end, P = [modules, mod_jingle_sip], + ?cfgh(P ++ [backend], mnesia, T(#{<<"backend">> => <<"mnesia">>})), ?cfgh(P ++ [proxy_host], "proxxxy.com", T(#{<<"proxy_host">> => <<"proxxxy.com">>})), ?cfgh(P ++ [proxy_port], 5601, @@ -1887,6 +1929,7 @@ mod_jingle_sip(_Config) -> ?cfgh(P ++ [username_to_phone], [{<<"2000006168">>, <<"+919177074440">>}], T(#{<<"username_to_phone">> => [#{<<"username">> => <<"2000006168">>, <<"phone">> => <<"+919177074440">>}]})), + ?errh(T(#{<<"backend">> => <<"amnesia">>})), ?errh(T(#{<<"proxy_host">> => 1})), ?errh(T(#{<<"proxy_port">> => 1000000})), ?errh(T(#{<<"listen_port">> => -1})), diff --git a/test/config_parser_SUITE_data/miscellaneous.toml b/test/config_parser_SUITE_data/miscellaneous.toml index e7e0b6f2553..abd5fe3ecfd 100644 --- a/test/config_parser_SUITE_data/miscellaneous.toml +++ b/test/config_parser_SUITE_data/miscellaneous.toml @@ -79,3 +79,7 @@ periodic_report = 300_000 tracking_id.id = "G-12345678" tracking_id.secret = "Secret" + +[internal_databases] + [internal_databases.mnesia] + [internal_databases.cets] diff --git a/test/config_parser_SUITE_data/modules.toml b/test/config_parser_SUITE_data/modules.toml index 4f98ed80880..96f85d4d800 100644 --- a/test/config_parser_SUITE_data/modules.toml +++ b/test/config_parser_SUITE_data/modules.toml @@ -5,6 +5,9 @@ ] default_server_domain = "localhost" +[internal_databases] + [internal_databases.mnesia] + [modules.mod_adhoc] iqdisc.type = "one_queue" report_commands_node = true diff --git a/test/config_parser_SUITE_data/mongooseim-pgsql.toml b/test/config_parser_SUITE_data/mongooseim-pgsql.toml index ff43dc17e02..327087cc16d 100644 --- a/test/config_parser_SUITE_data/mongooseim-pgsql.toml +++ b/test/config_parser_SUITE_data/mongooseim-pgsql.toml @@ -12,6 +12,10 @@ sm_backend = "mnesia" max_fsm_queue = 1000 +[internal_databases] + [internal_databases.mnesia] + [internal_databases.cets] + [[listen.http]] port = 5280 transport.num_acceptors = 10 diff --git a/test/ejabberd_sm_SUITE.erl b/test/ejabberd_sm_SUITE.erl index c24d473948c..db9e66dac3a 100644 --- a/test/ejabberd_sm_SUITE.erl +++ b/test/ejabberd_sm_SUITE.erl @@ -13,7 +13,7 @@ -import(config_parser_helper, [default_config/1]). -all() -> [{group, mnesia}, {group, redis}]. +all() -> [{group, mnesia}, {group, redis}, {group, cets}]. init_per_suite(C) -> {ok, _} = application:ensure_all_started(jid), @@ -33,7 +33,8 @@ end_per_suite(C) -> groups() -> [{mnesia, [], tests()}, - {redis, [], tests()}]. + {redis, [], tests()}, + {cets, [], tests()}]. tests() -> [open_session, @@ -64,7 +65,11 @@ init_per_group(mnesia, Config) -> ok = mnesia:start(), [{backend, ejabberd_sm_mnesia} | Config]; init_per_group(redis, Config) -> - init_redis_group(is_redis_running(), Config). + init_redis_group(is_redis_running(), Config); +init_per_group(cets, Config) -> + DiscoOpts = #{name => mongoose_cets_discovery, disco_file => "does_not_exist.txt"}, + {ok, Pid} = cets_discovery:start(DiscoOpts), + [{backend, ejabberd_sm_cets}, {cets_disco_pid, Pid} | Config]. init_redis_group(true, Config) -> Self = self(), @@ -86,7 +91,10 @@ end_per_group(mnesia, Config) -> mnesia:stop(), mnesia:delete_schema([node()]), Config; -end_per_group(_, Config) -> +end_per_group(cets, Config) -> + exit(proplists:get_value(cets_disco_pid, Config), kill), + Config; +end_per_group(redis, Config) -> whereis(test_helper) ! stop, Config. @@ -447,6 +455,8 @@ get_fun_for_unique_count(ejabberd_sm_mnesia) -> fun() -> mnesia:abort({badarg,[session,{{1442,941593,580189},list_to_pid("<0.23291.6>")}]}) end; +get_fun_for_unique_count(ejabberd_sm_cets) -> + fun() -> error(oops) end; get_fun_for_unique_count(ejabberd_sm_redis) -> fun() -> %% The code below is on purpose, it's to crash with badarg reason @@ -493,6 +503,8 @@ verify_session_opened(C, Sid, USR) -> do_verify_session_opened(ejabberd_sm_mnesia, Sid, {U, S, R} = USR) -> general_session_check(ejabberd_sm_mnesia, Sid, USR, U, S, R); +do_verify_session_opened(ejabberd_sm_cets, Sid, {U, S, R} = USR) -> + general_session_check(ejabberd_sm_cets, Sid, USR, U, S, R); do_verify_session_opened(ejabberd_sm_redis, Sid, {U, S, R} = USR) -> UHash = iolist_to_binary(hash(U, S, R, Sid)), Hashes = mongoose_redis:cmd(["SMEMBERS", n(node())]), @@ -521,7 +533,9 @@ clean_sessions(C) -> ejabberd_sm_mnesia -> mnesia:clear_table(session); ejabberd_sm_redis -> - mongoose_redis:cmd(["FLUSHALL"]) + mongoose_redis:cmd(["FLUSHALL"]); + ejabberd_sm_cets -> + ets:delete_all_objects(cets_session) end. generate_random_user(S) -> @@ -603,6 +617,8 @@ setup_sm(Config) -> ejabberd_sm_redis -> mongoose_redis:cmd(["FLUSHALL"]); ejabberd_sm_mnesia -> + ok; + ejabberd_sm_cets -> ok end. @@ -622,7 +638,8 @@ opts(Config) -> {sm_backend, sm_backend(?config(backend, Config))}]. sm_backend(ejabberd_sm_redis) -> redis; -sm_backend(ejabberd_sm_mnesia) -> mnesia. +sm_backend(ejabberd_sm_mnesia) -> mnesia; +sm_backend(ejabberd_sm_cets) -> cets. set_meck() -> meck:expect(gen_hook, add_handler, fun(_, _, _, _, _) -> ok end), diff --git a/test/mongoose_cleanup_SUITE.erl b/test/mongoose_cleanup_SUITE.erl index 5a5f28653f3..a87edc0fc5e 100644 --- a/test/mongoose_cleanup_SUITE.erl +++ b/test/mongoose_cleanup_SUITE.erl @@ -3,15 +3,23 @@ -include_lib("eunit/include/eunit.hrl"). -include("mongoose.hrl"). --export([all/0, +-export([all/0, groups/0, init_per_suite/1, end_per_suite/1, + init_per_group/2, end_per_group/2, init_per_testcase/2, end_per_testcase/2]). --export([cleaner_runs_hook_on_nodedown/1, notify_self_hook/3]). +-export([cleaner_runs_hook_on_nodedown/1, + cleaner_runs_hook_on_nodedown_for_host_type/1, + notify_self_hook/3, + notify_self_hook_for_host_type/3]). -export([auth_anonymous/1, last/1, stream_management/1, s2s/1, - bosh/1 + bosh/1, + component/1, + component_from_other_node_remains/1, + muc_room/1, + muc_room_from_other_node_remains/1 ]). -define(HOST, <<"localhost">>). @@ -24,13 +32,27 @@ all() -> [ cleaner_runs_hook_on_nodedown, + cleaner_runs_hook_on_nodedown_for_host_type, auth_anonymous, last, stream_management, s2s, - bosh + bosh, + [{group, Group} || {Group, _, _} <- groups()] ]. +groups() -> + [{component_cets, [], component_cases()}, + {component_mnesia, [], component_cases()}, + {muc_cets, [], muc_cases()}, + {muc_mnesia, [], muc_cases()}]. + +component_cases() -> + [component, component_from_other_node_remains]. + +muc_cases() -> + [muc_room, muc_room_from_other_node_remains]. + init_per_suite(Config) -> {ok, _} = application:ensure_all_started(jid), ok = mnesia:create_schema([node()]), @@ -44,25 +66,65 @@ end_per_suite(Config) -> mnesia:delete_schema([node()]), Config. +init_per_group(component_mnesia, Config) -> + mongoose_config:set_opt(component_backend, mnesia), + Config; +init_per_group(component_cets, Config) -> + mongoose_config:set_opt(component_backend, cets), + start_cets_disco(Config); +init_per_group(muc_mnesia, Config) -> + [{muc_backend, mnesia} | Config]; +init_per_group(muc_cets, Config) -> + [{muc_backend, cets} | start_cets_disco(Config)]. + +end_per_group(_Group, Config) -> + stop_cets_disco(Config). + init_per_testcase(TestCase, Config) -> + mim_ct_sup:start_link(ejabberd_sup), {ok, _HooksServer} = gen_hook:start_link(), {ok, _DomainSup} = mongoose_domain_sup:start_link(), setup_meck(meck_mods(TestCase)), + start_component(TestCase), + start_muc_backend(Config), Config. end_per_testcase(TestCase, _Config) -> + stop_component(TestCase), mongoose_modules:stop(), mongoose_config:set_opt({modules, ?HOST}, #{}), unload_meck(meck_mods(TestCase)). +start_component(TestCase) -> + case needs_component(TestCase) of + true -> + mongoose_router:start(), + mongoose_component:start(); + false -> + ok + end. + +stop_component(TestCase) -> + case needs_component(TestCase) of + true -> + mongoose_component:stop(); + false -> + ok + end. + +needs_component(TestCase) -> + lists:member(TestCase, component_cases()). + opts() -> [{hosts, [?HOST]}, {host_types, []}, {all_metrics_are_global, false}, + {s2s_backend, mnesia}, {{modules, ?HOST}, #{}}]. meck_mods(bosh) -> [exometer, mod_bosh_socket]; meck_mods(s2s) -> [exometer, ejabberd_commands, mongoose_bin]; +meck_mods(component) -> [exometer]; meck_mods(_) -> [exometer, ejabberd_sm, ejabberd_local]. %% ----------------------------------------------------- @@ -75,10 +137,8 @@ cleaner_runs_hook_on_nodedown(_Config) -> gen_hook:add_handler(node_cleanup, global, fun ?MODULE:notify_self_hook/3, #{self => self()}, 50), - FakeNode = fakename@fakehost, Cleaner ! {nodedown, FakeNode}, - receive {got_nodedown, FakeNode} -> ok after timer:seconds(1) -> @@ -87,10 +147,28 @@ cleaner_runs_hook_on_nodedown(_Config) -> ?assertEqual(false, meck:called(gen_hook, error_running_hook, ['_', '_', '_', '_', '_'])). +cleaner_runs_hook_on_nodedown_for_host_type(_Config) -> + HostType = ?HOST, + {ok, Cleaner} = mongoose_cleaner:start_link(), + gen_hook:add_handler(node_cleanup_for_host_type, HostType, + fun ?MODULE:notify_self_hook_for_host_type/3, + #{self => self()}, 50), + FakeNode = fakename@fakehost, + Cleaner ! {nodedown, FakeNode}, + receive + {got_nodedown_for_host_type, FakeNode, HostType} -> ok + after timer:seconds(1) -> + ct:fail({timeout, got_nodedown}) + end. + notify_self_hook(Acc, #{node := Node}, #{self := Self}) -> Self ! {got_nodedown, Node}, {ok, Acc}. +notify_self_hook_for_host_type(Acc, #{node := Node}, #{self := Self, host_type := HostType}) -> + Self ! {got_nodedown_for_host_type, Node, HostType}, + {ok, Acc}. + auth_anonymous(_Config) -> HostType = ?HOST, {U, S, R, JID, SID} = get_fake_session(), @@ -136,9 +214,9 @@ s2s(_Config) -> FromTo = {?HOST, <<"foreign">>}, ejabberd_s2s:try_register(FromTo), Self = self(), - [Self] = ejabberd_s2s:get_connections_pids(FromTo), + [Self] = ejabberd_s2s:get_s2s_out_pids(FromTo), mongoose_hooks:node_cleanup(node()), - [] = ejabberd_s2s:get_connections_pids(FromTo). + [] = ejabberd_s2s:get_s2s_out_pids(FromTo). bosh(_Config) -> {started, ok} = start(?HOST, mod_bosh), @@ -151,6 +229,47 @@ bosh(_Config) -> {error, _} = mod_bosh:get_session_socket(SID), ok. +component(_Config) -> + Handler = fun() -> ok end, + Domain = <<"cool.localhost">>, + Node = some_node, + {ok, _} = mongoose_component:register_components([Domain], Node, Handler, false), + true = mongoose_component:has_component(Domain), + #{mongoose_component := ok} = mongoose_hooks:node_cleanup(Node), + [] = mongoose_component:dirty_get_all_components(all), + false = mongoose_component:has_component(Domain), + ok. + +component_from_other_node_remains(_Config) -> + Handler = fun() -> ok end, + Domain = <<"cool.localhost">>, + {ok, Comps} = mongoose_component:register_components([Domain], other_node, Handler, false), + true = mongoose_component:has_component(Domain), + #{mongoose_component := ok} = mongoose_hooks:node_cleanup(some_node), + true = mongoose_component:has_component(Domain), + mongoose_component:unregister_components(Comps), + ok. + +muc_room(_Config) -> + HostType = ?HOST, + MucHost = <<"muc.localhost">>, + Pid = remote_pid(), + Node = node(Pid), + Room = <<"remote_room">>, + ok = mongoose_muc_online_backend:register_room(HostType, MucHost, Room, Pid), + ok = mongoose_muc_online_backend:node_cleanup(HostType, Node), + {error, not_found} = mongoose_muc_online_backend:find_room_pid(HostType, MucHost, Room). + +muc_room_from_other_node_remains(_Config) -> + HostType = ?HOST, + MucHost = <<"muc.localhost">>, + Pid = self(), + RemoteNode = node(remote_pid()), + Room = <<"room_on_other_node">>, + ok = mongoose_muc_online_backend:register_room(HostType, MucHost, Room, Pid), + ok = mongoose_muc_online_backend:node_cleanup(HostType, RemoteNode), + {ok, Pid} = mongoose_muc_online_backend:find_room_pid(HostType, MucHost, Room). + %% ----------------------------------------------------- %% Internal %% ----------------------------------------------------- @@ -212,3 +331,37 @@ start(HostType, Module) -> start(HostType, Module, Opts) -> mongoose_modules:ensure_started(HostType, Module, Opts). + +disco_opts() -> + #{name => mongoose_cets_discovery, disco_file => "does_not_exist.txt"}. + +start_cets_disco(Config) -> + {ok, Pid} = cets_discovery:start(disco_opts()), + [{cets_disco, Pid} | Config]. + +stop_cets_disco(Config) -> + case proplists:get_value(cets_disco, Config) of + Pid when is_pid(Pid) -> + exit(Pid, kill); + _ -> + ok + end. + +%% Pid 90 on cool_node@localhost +%% Made using: +%% erl -name cool_node@localhost +%% rp(term_to_binary(list_to_pid("<0.90.0>"))). +remote_pid_binary() -> + <<131, 88, 100, 0, 19, 99, 111, 111, 108, 95, 110, 111, 100, 101, 64, + 108, 111, 99, 97, 108, 104, 111, 115, 116, 0, 0, 0, 90, 0, 0, 0, 0, 100, + 200, 255, 233>>. + +remote_pid() -> + binary_to_term(remote_pid_binary()). + +start_muc_backend(Config) -> + case proplists:get_value(muc_backend, Config) of + undefined -> ok; + Backend -> + mongoose_muc_online_backend:start(?HOST, #{online_backend => Backend}) + end. diff --git a/test/mongoose_config_SUITE.erl b/test/mongoose_config_SUITE.erl index 3b13c9dc7f8..f28edda48c0 100644 --- a/test/mongoose_config_SUITE.erl +++ b/test/mongoose_config_SUITE.erl @@ -26,6 +26,7 @@ groups() -> ]. init_per_suite(Config) -> + mnesia:start(), %% TODO Remove this call when possible (We still need it for s2s) {ok, _} = application:ensure_all_started(jid), Config. @@ -175,6 +176,7 @@ minimal_config_opts() -> {hide_service_name, false}, {host_types, []}, {hosts, [<<"localhost">>]}, + {internal_databases, #{}}, {language, <<"en">>}, {listen, []}, {loglevel, warning}, @@ -185,6 +187,8 @@ minimal_config_opts() -> {routing_modules, mongoose_router:default_routing_modules()}, {services, #{}}, {sm_backend, mnesia}, + {component_backend, mnesia}, + {s2s_backend, mnesia}, {{auth, <<"localhost">>}, config_parser_helper:default_auth()}, {{modules, <<"localhost">>}, #{}}, {{replaced_wait_timeout, <<"localhost">>}, 2000}, @@ -200,6 +204,7 @@ do_start_slave_node() -> {init_timeout, 10}, %% in seconds {startup_timeout, 10}], %% in seconds {ok, SlaveNode} = ct_slave:start(slave_name(), Opts), + rpc:call(SlaveNode, mnesia, start, []), %% TODO remove this call when possible {ok, CWD} = file:get_cwd(), ok = rpc:call(SlaveNode, file, set_cwd, [CWD]), %% Tell the remote node where to find the SUITE code diff --git a/test/mongoose_listener_SUITE.erl b/test/mongoose_listener_SUITE.erl index 00fa37bfb20..e9d92945a01 100644 --- a/test/mongoose_listener_SUITE.erl +++ b/test/mongoose_listener_SUITE.erl @@ -29,6 +29,7 @@ end_per_testcase(_Case, Config) -> Config. init_per_suite(C) -> + mnesia:start(), C. end_per_suite(_C) -> diff --git a/test/muc_light_SUITE.erl b/test/muc_light_SUITE.erl index 7bee59df83b..47a4825e6d3 100644 --- a/test/muc_light_SUITE.erl +++ b/test/muc_light_SUITE.erl @@ -82,6 +82,7 @@ opts() -> [{hosts, [host_type()]}, {host_types, []}, {all_metrics_are_global, false}, + {component_backend, mnesia}, {{modules, host_type()}, #{mod_muc_light => default_mod_config(mod_muc_light)}}]. %% ------------------------------------------------------------------ diff --git a/test/router_SUITE.erl b/test/router_SUITE.erl index adab7f31c6b..1b3ec4cf718 100644 --- a/test/router_SUITE.erl +++ b/test/router_SUITE.erl @@ -11,8 +11,7 @@ all() -> [ - {group, routing}, - {group, schema} + {group, routing} ]. groups() -> @@ -20,11 +19,7 @@ groups() -> {routing, [], [ basic_routing, do_not_reroute_errors - ]}, - {schema, [], [ - update_tables_hidden_components, - update_tables_hidden_components_idempotent - ]} + ]} ]. init_per_suite(C) -> @@ -46,23 +41,14 @@ init_per_group(routing, Config) -> mongoose_config:set_opt(routing_modules, xmpp_router:expand_routing_modules(RoutingModules)), gen_hook:start_link(), ejabberd_router:start_link(), - Config; -init_per_group(schema, Config) -> - remove_component_tables(), Config. end_per_group(routing, _Config) -> - mongoose_config:unset_opt(routing_modules); -end_per_group(schema, _Config) -> - ok. + mongoose_config:unset_opt(routing_modules). init_per_testcase(_CaseName, Config) -> Config. -end_per_testcase(HiddenComponent, _Config) - when HiddenComponent == update_tables_hidden_components; - HiddenComponent == update_tables_hidden_components_idempotent -> - remove_component_tables(); end_per_testcase(_CaseName, _Config) -> ok. @@ -109,26 +95,6 @@ do_not_reroute_errors(_) -> ejabberd_router:route(From, To, Acc, Stanza), ok. -update_tables_hidden_components(_C) -> - %% Tables as of b076e4a62a8b21188245f13c42f9cfd93e06e6b7 - create_component_tables([domain, handler, node]), - - ejabberd_router:update_tables(), - - %% Local table is removed and the distributed one has a new list of attributes - false = lists:member(external_component, mnesia:system_info(tables)), - [domain, handler, node, is_hidden] = mnesia:table_info(external_component_global, attributes). - -update_tables_hidden_components_idempotent(_C) -> - AttrsWithHidden = [domain, handler, node, is_hidden], - create_component_tables(AttrsWithHidden), - - ejabberd_router:update_tables(), - - %% Local table is not removed and the attribute list of the distributed one is not changed - true = lists:member(external_component, mnesia:system_info(tables)), - AttrsWithHidden = mnesia:table_info(external_component_global, attributes). - %% --------------------------------------------------------------- %% Helpers %% --------------------------------------------------------------- @@ -200,21 +166,6 @@ verify(L) -> ct:pal("all messages routed correctly") end. -create_component_tables(AttrList) -> - {atomic, ok} = - mnesia:create_table(external_component, - [{attributes, AttrList}, - {local_content, true}]), - {atomic, ok} = - mnesia:create_table(external_component_global, - [{attributes, AttrList}, - {type, bag}, - {record_name, external_component}]). - -remove_component_tables() -> - mnesia:delete_table(external_component), - mnesia:delete_table(external_component_global). - resend_as_error(From0, To0, Acc0, Packet0) -> {Acc1, Packet1} = jlib:make_error_reply(Acc0, Packet0, #xmlel{}), Acc2 = ejabberd_router:route(To0, From0, Acc1, Packet1), diff --git a/tools/build-releases.sh b/tools/build-releases.sh index 27765e874c3..2df2cec6e18 100755 --- a/tools/build-releases.sh +++ b/tools/build-releases.sh @@ -31,6 +31,7 @@ function try_copy_release --exclude rel/mongooseim/Mnesia.* \ --exclude rel/mongooseim/var \ --exclude rel/mongooseim/log \ + --exclude rel/mongooseim/etc/cets_disco.txt \ -al _build/$FIRST_NODE/ _build/$NODE/ ./tools/test_runner/apply_templates.sh "$NODE" "$(pwd)/_build/$NODE/" fi diff --git a/tools/gh-actions-configure-preset.sh b/tools/gh-actions-configure-preset.sh index 028b449cbd7..64f2e12cd73 100755 --- a/tools/gh-actions-configure-preset.sh +++ b/tools/gh-actions-configure-preset.sh @@ -36,6 +36,8 @@ case "$PRESET" in export REL_CONFIG="with-mysql with-redis with-amqp_client" ;; pgsql_mnesia) export REL_CONFIG="with-pgsql" ;; + cets_mnesia) + export REL_CONFIG="with-pgsql" ;; ldap_mnesia) export REL_CONFIG="with-none" ;; elasticsearch_and_cassandra_mnesia) diff --git a/tools/test_runner/apply_templates.erl b/tools/test_runner/apply_templates.erl index cef57a23d42..e18bafaf192 100644 --- a/tools/test_runner/apply_templates.erl +++ b/tools/test_runner/apply_templates.erl @@ -10,7 +10,7 @@ main([NodeAtom, BuildDirAtom]) -> log("BuildDirAtom=~p~n", [BuildDirAtom]), BuildDir = atom_to_list(BuildDirAtom), RelDir = BuildDir ++ "/rel/mongooseim", - Templates = templates(RelDir), + Templates = templates(RelDir, NodeAtom), log("Templates:~n~p~n", [Templates]), Vars0 = overlay_vars(NodeAtom), Vars = Vars0#{output_dir => list_to_binary(RelDir)}, @@ -39,8 +39,9 @@ ensure_binary_strings(Vars) -> end, Vars). %% Based on rebar.config overlay section -templates(RelDir) -> - simple_templates(RelDir) ++ erts_templates(RelDir). +templates(RelDir, NodeAtom) -> + simple_templates(RelDir) ++ erts_templates(RelDir) + ++ disco_template(RelDir, NodeAtom). simple_templates(RelDir) -> [{In, RelDir ++ "/" ++ Out} || {In, Out} <- simple_templates()]. @@ -60,6 +61,14 @@ erts_templates(RelDir) -> ErtsDirs = filelib:wildcard(RelDir ++ "/erts-*"), [{"rel/files/nodetool", ErtsDir ++ "/bin/nodetool"} || ErtsDir <- ErtsDirs]. +disco_template(RelDir, NodeAtom) -> + case lists:member(NodeAtom, [mim1, mim2, mim3]) of + true -> + [{"rel/files/cets_disco.txt", RelDir ++ "/etc/cets_disco.txt"}]; + false -> + [] + end. + render_template(In, Out, Vars) -> BinIn = bbmustache:parse_file(In), %% Do render twice to allow templates in variables