From 3ea9512c48c2132ac5eca58df5cdb17a8e31629f Mon Sep 17 00:00:00 2001 From: Mariyan Dimitrov Date: Tue, 1 Oct 2024 10:38:23 +0300 Subject: [PATCH] feat(db): Add db (#6) * feat(db): Add draft db work * feat(db): Add more draft db work * Update tests/unit/test_irc.py Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> * fix(typo): Remove paren * chore(lib): Add data interfaces * chore: Address feedback * Update templates/config.yaml Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> * chore: Address feedback * refactor(pydantic): Use pydantic models * chore(lint): Fix linting issues in src/ * tests(irc): Start from scratch * feat(irc): Pydantic baby * feat(observer): Standardize connection name * tests(db): Add db observer tests * chore(deps): Add mocker * tests(irc): Extend irc tests * tests(irc): More irc tests * refactor(logger): Don't log file content creation * chore(naming: Rename to main * chore(license): Remove empty line * chore(lint): Fix linting issues * chore(static): Fix bandit issues * docs(): Update src-docs * chore(license): Add license header * chore(license): Ignore templated files * chore(fmt): Black is the new black * chore(hooks): Add upgrade hook * chore(status): Add Maintanence on stop * chore(config): Remove property decorator * chore(relation): Pass a list of relation IDs * chore(endpoint): Split once * chore(config): Handle KeyError in model * chore(docstring): Remove obsolete part * refactor(irc): Move out conf to constants * refactor(irc): Use data hash directly * refactor(irc): Rename file var * refactor(irc): Use https * refactor(db): Use from_relation * chore(lint): Fix linting issues * tests(irc): Basic integration * refactor(db): Use uri in model * tests(irc): More irc tests * chore(db): Use MaintenanceStatus * chore(db): Add endpoints_changed * chore(systemd): Idempotent service enable * chore(config): Handle KeyError * ci(): Make lint happi * tests(): Assert message * tests(): reAssert message * chore(): No reason no cry * chore(): Raise custom error * chore(): Log config exception * chore(): Better error message * chore(logging): Lazy swazy --------- Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- .licenserc.yaml | 1 + charmcraft.yaml | 3 + config.yaml | 20 + .../data_platform_libs/v0/data_interfaces.py | 3495 +++++++++++++++++ lib/charms/operator_libs_linux/v1/systemd.py | 288 ++ metadata.yaml | 4 + requirements.txt | 2 +- src-docs/charm.py.md | 92 + src-docs/charm_types.py.md | 179 + src-docs/constants.py.md | 19 + src-docs/database_observer.py.md | 69 + src-docs/exceptions.py.md | 90 + src-docs/irc.py.md | 186 + src-docs/matrix_observer.py.md | 59 + src/charm.py | 85 +- src/charm_types.py | 107 + src/constants.py | 30 + src/database_observer.py | 66 + src/exceptions.py | 48 + src/irc.py | 253 +- src/matrix_observer.py | 42 + templates/config.yaml | 139 + templates/matrix-appservice-irc.service | 18 + templates/matrix-appservice-irc.target | 6 + tests/integration/helpers.py | 17 +- tests/integration/test_charm.py | 30 +- tests/unit/test_charm_types.py | 34 + tests/unit/test_database_observer.py | 102 + tests/unit/test_irc.py | 451 +++ tox.ini | 4 +- 30 files changed, 5802 insertions(+), 137 deletions(-) create mode 100644 config.yaml create mode 100644 lib/charms/data_platform_libs/v0/data_interfaces.py create mode 100644 lib/charms/operator_libs_linux/v1/systemd.py create mode 100644 src-docs/charm.py.md create mode 100644 src-docs/charm_types.py.md create mode 100644 src-docs/constants.py.md create mode 100644 src-docs/database_observer.py.md create mode 100644 src-docs/exceptions.py.md create mode 100644 src-docs/irc.py.md create mode 100644 src-docs/matrix_observer.py.md create mode 100644 src/charm_types.py create mode 100644 src/database_observer.py create mode 100644 src/matrix_observer.py create mode 100644 templates/config.yaml create mode 100644 templates/matrix-appservice-irc.service create mode 100644 templates/matrix-appservice-irc.target create mode 100644 tests/unit/test_charm_types.py create mode 100644 tests/unit/test_database_observer.py create mode 100644 tests/unit/test_irc.py diff --git a/.licenserc.yaml b/.licenserc.yaml index d2de439..a66af28 100644 --- a/.licenserc.yaml +++ b/.licenserc.yaml @@ -28,6 +28,7 @@ header: - 'lib/**' - 'LICENSE' - '.licenserc.yaml' + - 'templates/**' - '.trivyignore' - 'trivy.yaml' - '.wokeignore' diff --git a/charmcraft.yaml b/charmcraft.yaml index d8ffab2..4a3f430 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -8,3 +8,6 @@ bases: run-on: - name: ubuntu channel: "22.04" +parts: + charm: + build-packages: [cargo, rustc, pkg-config, libffi-dev, libssl-dev] diff --git a/config.yaml b/config.yaml new file mode 100644 index 0000000..74a74d8 --- /dev/null +++ b/config.yaml @@ -0,0 +1,20 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +options: + ident_enabled: + type: boolean + default: false + description: | + Configures whether to enable IRC ident server. + Reference: | + https://github.com/matrix-org/matrix-appservice-irc/blob/develop/config.sample.yaml#L437 + bridge_admins: + type: string + description: | + Comma separated list of admins to be allowed to manage the bridge. + This takes the form of user1:domainX.com,user2:domainY.com... + bot_nickname: + type: string + description: | + Name of the bot that will connect to the IRC network from Matrix. diff --git a/lib/charms/data_platform_libs/v0/data_interfaces.py b/lib/charms/data_platform_libs/v0/data_interfaces.py new file mode 100644 index 0000000..a2162aa --- /dev/null +++ b/lib/charms/data_platform_libs/v0/data_interfaces.py @@ -0,0 +1,3495 @@ +# Copyright 2023 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Library to manage the relation for the data-platform products. + +This library contains the Requires and Provides classes for handling the relation +between an application and multiple managed application supported by the data-team: +MySQL, Postgresql, MongoDB, Redis, and Kafka. + +### Database (MySQL, Postgresql, MongoDB, and Redis) + +#### Requires Charm +This library is a uniform interface to a selection of common database +metadata, with added custom events that add convenience to database management, +and methods to consume the application related data. + + +Following an example of using the DatabaseCreatedEvent, in the context of the +application charm code: + +```python + +from charms.data_platform_libs.v0.data_interfaces import ( + DatabaseCreatedEvent, + DatabaseRequires, +) + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Charm events defined in the database requires charm library. + self.database = DatabaseRequires(self, relation_name="database", database_name="database") + self.framework.observe(self.database.on.database_created, self._on_database_created) + + def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + + # Start application with rendered configuration + self._start_application(config_file) + + # Set active status + self.unit.status = ActiveStatus("received database credentials") +``` + +As shown above, the library provides some custom events to handle specific situations, +which are listed below: + +- database_created: event emitted when the requested database is created. +- endpoints_changed: event emitted when the read/write endpoints of the database have changed. +- read_only_endpoints_changed: event emitted when the read-only endpoints of the database + have changed. Event is not triggered if read/write endpoints changed too. + +If it is needed to connect multiple database clusters to the same relation endpoint +the application charm can implement the same code as if it would connect to only +one database cluster (like the above code example). + +To differentiate multiple clusters connected to the same relation endpoint +the application charm can use the name of the remote application: + +```python + +def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Get the remote app name of the cluster that triggered this event + cluster = event.relation.app.name +``` + +It is also possible to provide an alias for each different database cluster/relation. + +So, it is possible to differentiate the clusters in two ways. +The first is to use the remote application name, i.e., `event.relation.app.name`, as above. + +The second way is to use different event handlers to handle each cluster events. +The implementation would be something like the following code: + +```python + +from charms.data_platform_libs.v0.data_interfaces import ( + DatabaseCreatedEvent, + DatabaseRequires, +) + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Define the cluster aliases and one handler for each cluster database created event. + self.database = DatabaseRequires( + self, + relation_name="database", + database_name="database", + relations_aliases = ["cluster1", "cluster2"], + ) + self.framework.observe( + self.database.on.cluster1_database_created, self._on_cluster1_database_created + ) + self.framework.observe( + self.database.on.cluster2_database_created, self._on_cluster2_database_created + ) + + def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster1 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + + def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster2 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + +``` + +When it's needed to check whether a plugin (extension) is enabled on the PostgreSQL +charm, you can use the is_postgresql_plugin_enabled method. To use that, you need to +add the following dependency to your charmcraft.yaml file: + +```yaml + +parts: + charm: + charm-binary-python-packages: + - psycopg[binary] + +``` + +### Provider Charm + +Following an example of using the DatabaseRequestedEvent, in the context of the +database charm code: + +```python +from charms.data_platform_libs.v0.data_interfaces import DatabaseProvides + +class SampleCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + # Charm events defined in the database provides charm library. + self.provided_database = DatabaseProvides(self, relation_name="database") + self.framework.observe(self.provided_database.on.database_requested, + self._on_database_requested) + # Database generic helper + self.database = DatabaseHelper() + + def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: + # Handle the event triggered by a new database requested in the relation + # Retrieve the database name using the charm library. + db_name = event.database + # generate a new user credential + username = self.database.generate_user() + password = self.database.generate_password() + # set the credentials for the relation + self.provided_database.set_credentials(event.relation.id, username, password) + # set other variables for the relation event.set_tls("False") +``` +As shown above, the library provides a custom event (database_requested) to handle +the situation when an application charm requests a new database to be created. +It's preferred to subscribe to this event instead of relation changed event to avoid +creating a new database when other information other than a database name is +exchanged in the relation databag. + +### Kafka + +This library is the interface to use and interact with the Kafka charm. This library contains +custom events that add convenience to manage Kafka, and provides methods to consume the +application related data. + +#### Requirer Charm + +```python + +from charms.data_platform_libs.v0.data_interfaces import ( + BootstrapServerChangedEvent, + KafkaRequires, + TopicCreatedEvent, +) + +class ApplicationCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + self.kafka = KafkaRequires(self, "kafka_client", "test-topic") + self.framework.observe( + self.kafka.on.bootstrap_server_changed, self._on_kafka_bootstrap_server_changed + ) + self.framework.observe( + self.kafka.on.topic_created, self._on_kafka_topic_created + ) + + def _on_kafka_bootstrap_server_changed(self, event: BootstrapServerChangedEvent): + # Event triggered when a bootstrap server was changed for this application + + new_bootstrap_server = event.bootstrap_server + ... + + def _on_kafka_topic_created(self, event: TopicCreatedEvent): + # Event triggered when a topic was created for this application + username = event.username + password = event.password + tls = event.tls + tls_ca= event.tls_ca + bootstrap_server event.bootstrap_server + consumer_group_prefic = event.consumer_group_prefix + zookeeper_uris = event.zookeeper_uris + ... + +``` + +As shown above, the library provides some custom events to handle specific situations, +which are listed below: + +- topic_created: event emitted when the requested topic is created. +- bootstrap_server_changed: event emitted when the bootstrap server have changed. +- credential_changed: event emitted when the credentials of Kafka changed. + +### Provider Charm + +Following the previous example, this is an example of the provider charm. + +```python +class SampleCharm(CharmBase): + +from charms.data_platform_libs.v0.data_interfaces import ( + KafkaProvides, + TopicRequestedEvent, +) + + def __init__(self, *args): + super().__init__(*args) + + # Default charm events. + self.framework.observe(self.on.start, self._on_start) + + # Charm events defined in the Kafka Provides charm library. + self.kafka_provider = KafkaProvides(self, relation_name="kafka_client") + self.framework.observe(self.kafka_provider.on.topic_requested, self._on_topic_requested) + # Kafka generic helper + self.kafka = KafkaHelper() + + def _on_topic_requested(self, event: TopicRequestedEvent): + # Handle the on_topic_requested event. + + topic = event.topic + relation_id = event.relation.id + # set connection info in the databag relation + self.kafka_provider.set_bootstrap_server(relation_id, self.kafka.get_bootstrap_server()) + self.kafka_provider.set_credentials(relation_id, username=username, password=password) + self.kafka_provider.set_consumer_group_prefix(relation_id, ...) + self.kafka_provider.set_tls(relation_id, "False") + self.kafka_provider.set_zookeeper_uris(relation_id, ...) + +``` +As shown above, the library provides a custom event (topic_requested) to handle +the situation when an application charm requests a new topic to be created. +It is preferred to subscribe to this event instead of relation changed event to avoid +creating a new topic when other information other than a topic name is +exchanged in the relation databag. +""" + +import copy +import json +import logging +from abc import ABC, abstractmethod +from collections import UserDict, namedtuple +from datetime import datetime +from enum import Enum +from typing import ( + Callable, + Dict, + ItemsView, + KeysView, + List, + Optional, + Set, + Tuple, + Union, + ValuesView, +) + +from ops import JujuVersion, Model, Secret, SecretInfo, SecretNotFoundError +from ops.charm import ( + CharmBase, + CharmEvents, + RelationChangedEvent, + RelationCreatedEvent, + RelationEvent, + SecretChangedEvent, +) +from ops.framework import EventSource, Object +from ops.model import Application, ModelError, Relation, Unit + +# The unique Charmhub library identifier, never change it +LIBID = "6c3e6b6680d64e9c89e611d1a15f65be" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 38 + +PYDEPS = ["ops>=2.0.0"] + +logger = logging.getLogger(__name__) + +Diff = namedtuple("Diff", "added changed deleted") +Diff.__doc__ = """ +A tuple for storing the diff between two data mappings. + +added - keys that were added +changed - keys that still exist but have new values +deleted - key that were deleted""" + + +PROV_SECRET_PREFIX = "secret-" +REQ_SECRET_FIELDS = "requested-secrets" +GROUP_MAPPING_FIELD = "secret_group_mapping" +GROUP_SEPARATOR = "@" + + +class SecretGroup(str): + """Secret groups specific type.""" + + +class SecretGroupsAggregate(str): + """Secret groups with option to extend with additional constants.""" + + def __init__(self): + self.USER = SecretGroup("user") + self.TLS = SecretGroup("tls") + self.EXTRA = SecretGroup("extra") + + def __setattr__(self, name, value): + """Setting internal constants.""" + if name in self.__dict__: + raise RuntimeError("Can't set constant!") + else: + super().__setattr__(name, SecretGroup(value)) + + def groups(self) -> list: + """Return the list of stored SecretGroups.""" + return list(self.__dict__.values()) + + def get_group(self, group: str) -> Optional[SecretGroup]: + """If the input str translates to a group name, return that.""" + return SecretGroup(group) if group in self.groups() else None + + +SECRET_GROUPS = SecretGroupsAggregate() + + +class DataInterfacesError(Exception): + """Common ancestor for DataInterfaces related exceptions.""" + + +class SecretError(DataInterfacesError): + """Common ancestor for Secrets related exceptions.""" + + +class SecretAlreadyExistsError(SecretError): + """A secret that was to be added already exists.""" + + +class SecretsUnavailableError(SecretError): + """Secrets aren't yet available for Juju version used.""" + + +class SecretsIllegalUpdateError(SecretError): + """Secrets aren't yet available for Juju version used.""" + + +class IllegalOperationError(DataInterfacesError): + """To be used when an operation is not allowed to be performed.""" + + +def get_encoded_dict( + relation: Relation, member: Union[Unit, Application], field: str +) -> Optional[Dict[str, str]]: + """Retrieve and decode an encoded field from relation data.""" + data = json.loads(relation.data[member].get(field, "{}")) + if isinstance(data, dict): + return data + logger.error("Unexpected datatype for %s instead of dict.", str(data)) + + +def get_encoded_list( + relation: Relation, member: Union[Unit, Application], field: str +) -> Optional[List[str]]: + """Retrieve and decode an encoded field from relation data.""" + data = json.loads(relation.data[member].get(field, "[]")) + if isinstance(data, list): + return data + logger.error("Unexpected datatype for %s instead of list.", str(data)) + + +def set_encoded_field( + relation: Relation, + member: Union[Unit, Application], + field: str, + value: Union[str, list, Dict[str, str]], +) -> None: + """Set an encoded field from relation data.""" + relation.data[member].update({field: json.dumps(value)}) + + +def diff(event: RelationChangedEvent, bucket: Optional[Union[Unit, Application]]) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + bucket: bucket of the databag (app or unit) + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + # Retrieve the old data from the data key in the application relation databag. + if not bucket: + return Diff([], [], []) + + old_data = get_encoded_dict(event.relation, bucket, "data") + + if not old_data: + old_data = {} + + # Retrieve the new data from the event relation databag. + new_data = ( + {key: value for key, value in event.relation.data[event.app].items() if key != "data"} + if event.app + else {} + ) + + # These are the keys that were added to the databag and triggered this event. + added = new_data.keys() - old_data.keys() # pyright: ignore [reportAssignmentType] + # These are the keys that were removed from the databag and triggered this event. + deleted = old_data.keys() - new_data.keys() # pyright: ignore [reportAssignmentType] + # These are the keys that already existed in the databag, + # but had their values changed. + changed = { + key + for key in old_data.keys() & new_data.keys() # pyright: ignore [reportAssignmentType] + if old_data[key] != new_data[key] # pyright: ignore [reportAssignmentType] + } + # Convert the new_data to a serializable format and save it for a next diff check. + set_encoded_field(event.relation, bucket, "data", new_data) + + # Return the diff with all possible changes. + return Diff(added, changed, deleted) + + +def leader_only(f): + """Decorator to ensure that only leader can perform given operation.""" + + def wrapper(self, *args, **kwargs): + if self.component == self.local_app and not self.local_unit.is_leader(): + logger.error( + "This operation (%s()) can only be performed by the leader unit", f.__name__ + ) + return + return f(self, *args, **kwargs) + + wrapper.leader_only = True + return wrapper + + +def juju_secrets_only(f): + """Decorator to ensure that certain operations would be only executed on Juju3.""" + + def wrapper(self, *args, **kwargs): + if not self.secrets_enabled: + raise SecretsUnavailableError("Secrets unavailable on current Juju version") + return f(self, *args, **kwargs) + + return wrapper + + +def dynamic_secrets_only(f): + """Decorator to ensure that certain operations would be only executed when NO static secrets are defined.""" + + def wrapper(self, *args, **kwargs): + if self.static_secret_fields: + raise IllegalOperationError( + "Unsafe usage of statically and dynamically defined secrets, aborting." + ) + return f(self, *args, **kwargs) + + return wrapper + + +def either_static_or_dynamic_secrets(f): + """Decorator to ensure that static and dynamic secrets won't be used in parallel.""" + + def wrapper(self, *args, **kwargs): + if self.static_secret_fields and set(self.current_secret_fields) - set( + self.static_secret_fields + ): + raise IllegalOperationError( + "Unsafe usage of statically and dynamically defined secrets, aborting." + ) + return f(self, *args, **kwargs) + + return wrapper + + +class Scope(Enum): + """Peer relations scope.""" + + APP = "app" + UNIT = "unit" + + +################################################################################ +# Secrets internal caching +################################################################################ + + +class CachedSecret: + """Locally cache a secret. + + The data structure is precisely re-using/simulating as in the actual Secret Storage + """ + + def __init__( + self, + model: Model, + component: Union[Application, Unit], + label: str, + secret_uri: Optional[str] = None, + legacy_labels: List[str] = [], + ): + self._secret_meta = None + self._secret_content = {} + self._secret_uri = secret_uri + self.label = label + self._model = model + self.component = component + self.legacy_labels = legacy_labels + self.current_label = None + + def add_secret( + self, + content: Dict[str, str], + relation: Optional[Relation] = None, + label: Optional[str] = None, + ) -> Secret: + """Create a new secret.""" + if self._secret_uri: + raise SecretAlreadyExistsError( + "Secret is already defined with uri %s", self._secret_uri + ) + + label = self.label if not label else label + + secret = self.component.add_secret(content, label=label) + if relation and relation.app != self._model.app: + # If it's not a peer relation, grant is to be applied + secret.grant(relation) + self._secret_uri = secret.id + self._secret_meta = secret + return self._secret_meta + + @property + def meta(self) -> Optional[Secret]: + """Getting cached secret meta-information.""" + if not self._secret_meta: + if not (self._secret_uri or self.label): + return + + for label in [self.label] + self.legacy_labels: + try: + self._secret_meta = self._model.get_secret(label=label) + except SecretNotFoundError: + pass + else: + if label != self.label: + self.current_label = label + break + + # If still not found, to be checked by URI, to be labelled with the proposed label + if not self._secret_meta and self._secret_uri: + self._secret_meta = self._model.get_secret(id=self._secret_uri, label=self.label) + return self._secret_meta + + def get_content(self) -> Dict[str, str]: + """Getting cached secret content.""" + if not self._secret_content: + if self.meta: + try: + self._secret_content = self.meta.get_content(refresh=True) + except (ValueError, ModelError) as err: + # https://bugs.launchpad.net/juju/+bug/2042596 + # Only triggered when 'refresh' is set + known_model_errors = [ + "ERROR either URI or label should be used for getting an owned secret but not both", + "ERROR secret owner cannot use --refresh", + ] + if isinstance(err, ModelError) and not any( + msg in str(err) for msg in known_model_errors + ): + raise + # Due to: ValueError: Secret owner cannot use refresh=True + self._secret_content = self.meta.get_content() + return self._secret_content + + def _move_to_new_label_if_needed(self): + """Helper function to re-create the secret with a different label.""" + if not self.current_label or not (self.meta and self._secret_meta): + return + + # Create a new secret with the new label + content = self._secret_meta.get_content() + self._secret_uri = None + + # I wish we could just check if we are the owners of the secret... + try: + self._secret_meta = self.add_secret(content, label=self.label) + except ModelError as err: + if "this unit is not the leader" not in str(err): + raise + self.current_label = None + + def set_content(self, content: Dict[str, str]) -> None: + """Setting cached secret content.""" + if not self.meta: + return + + # DPE-4182: do not create new revision if the content stay the same + if content == self.get_content(): + return + + if content: + self._move_to_new_label_if_needed() + self.meta.set_content(content) + self._secret_content = content + else: + self.meta.remove_all_revisions() + + def get_info(self) -> Optional[SecretInfo]: + """Wrapper function to apply the corresponding call on the Secret object within CachedSecret if any.""" + if self.meta: + return self.meta.get_info() + + def remove(self) -> None: + """Remove secret.""" + if not self.meta: + raise SecretsUnavailableError("Non-existent secret was attempted to be removed.") + try: + self.meta.remove_all_revisions() + except SecretNotFoundError: + pass + self._secret_content = {} + self._secret_meta = None + self._secret_uri = None + + +class SecretCache: + """A data structure storing CachedSecret objects.""" + + def __init__(self, model: Model, component: Union[Application, Unit]): + self._model = model + self.component = component + self._secrets: Dict[str, CachedSecret] = {} + + def get( + self, label: str, uri: Optional[str] = None, legacy_labels: List[str] = [] + ) -> Optional[CachedSecret]: + """Getting a secret from Juju Secret store or cache.""" + if not self._secrets.get(label): + secret = CachedSecret( + self._model, self.component, label, uri, legacy_labels=legacy_labels + ) + if secret.meta: + self._secrets[label] = secret + return self._secrets.get(label) + + def add(self, label: str, content: Dict[str, str], relation: Relation) -> CachedSecret: + """Adding a secret to Juju Secret.""" + if self._secrets.get(label): + raise SecretAlreadyExistsError(f"Secret {label} already exists") + + secret = CachedSecret(self._model, self.component, label) + secret.add_secret(content, relation) + self._secrets[label] = secret + return self._secrets[label] + + def remove(self, label: str) -> None: + """Remove a secret from the cache.""" + if secret := self.get(label): + try: + secret.remove() + self._secrets.pop(label) + except (SecretsUnavailableError, KeyError): + pass + else: + return + logging.debug("Non-existing Juju Secret was attempted to be removed %s", label) + + +################################################################################ +# Relation Data base/abstract ancestors (i.e. parent classes) +################################################################################ + + +# Base Data + + +class DataDict(UserDict): + """Python Standard Library 'dict' - like representation of Relation Data.""" + + def __init__(self, relation_data: "Data", relation_id: int): + self.relation_data = relation_data + self.relation_id = relation_id + + @property + def data(self) -> Dict[str, str]: + """Return the full content of the Abstract Relation Data dictionary.""" + result = self.relation_data.fetch_my_relation_data([self.relation_id]) + try: + result_remote = self.relation_data.fetch_relation_data([self.relation_id]) + except NotImplementedError: + result_remote = {self.relation_id: {}} + if result: + result_remote[self.relation_id].update(result[self.relation_id]) + return result_remote.get(self.relation_id, {}) + + def __setitem__(self, key: str, item: str) -> None: + """Set an item of the Abstract Relation Data dictionary.""" + self.relation_data.update_relation_data(self.relation_id, {key: item}) + + def __getitem__(self, key: str) -> str: + """Get an item of the Abstract Relation Data dictionary.""" + result = None + + # Avoiding "leader_only" error when cross-charm non-leader unit, not to report useless error + if ( + not hasattr(self.relation_data.fetch_my_relation_field, "leader_only") + or self.relation_data.component != self.relation_data.local_app + or self.relation_data.local_unit.is_leader() + ): + result = self.relation_data.fetch_my_relation_field(self.relation_id, key) + + if not result: + try: + result = self.relation_data.fetch_relation_field(self.relation_id, key) + except NotImplementedError: + pass + + if not result: + raise KeyError + return result + + def __eq__(self, d: dict) -> bool: + """Equality.""" + return self.data == d + + def __repr__(self) -> str: + """String representation Abstract Relation Data dictionary.""" + return repr(self.data) + + def __len__(self) -> int: + """Length of the Abstract Relation Data dictionary.""" + return len(self.data) + + def __delitem__(self, key: str) -> None: + """Delete an item of the Abstract Relation Data dictionary.""" + self.relation_data.delete_relation_data(self.relation_id, [key]) + + def has_key(self, key: str) -> bool: + """Does the key exist in the Abstract Relation Data dictionary?""" + return key in self.data + + def update(self, items: Dict[str, str]): + """Update the Abstract Relation Data dictionary.""" + self.relation_data.update_relation_data(self.relation_id, items) + + def keys(self) -> KeysView[str]: + """Keys of the Abstract Relation Data dictionary.""" + return self.data.keys() + + def values(self) -> ValuesView[str]: + """Values of the Abstract Relation Data dictionary.""" + return self.data.values() + + def items(self) -> ItemsView[str, str]: + """Items of the Abstract Relation Data dictionary.""" + return self.data.items() + + def pop(self, item: str) -> str: + """Pop an item of the Abstract Relation Data dictionary.""" + result = self.relation_data.fetch_my_relation_field(self.relation_id, item) + if not result: + raise KeyError(f"Item {item} doesn't exist.") + self.relation_data.delete_relation_data(self.relation_id, [item]) + return result + + def __contains__(self, item: str) -> bool: + """Does the Abstract Relation Data dictionary contain item?""" + return item in self.data.values() + + def __iter__(self): + """Iterate through the Abstract Relation Data dictionary.""" + return iter(self.data) + + def get(self, key: str, default: Optional[str] = None) -> Optional[str]: + """Safely get an item of the Abstract Relation Data dictionary.""" + try: + if result := self[key]: + return result + except KeyError: + return default + + +class Data(ABC): + """Base relation data mainpulation (abstract) class.""" + + SCOPE = Scope.APP + + # Local map to associate mappings with secrets potentially as a group + SECRET_LABEL_MAP = { + "username": SECRET_GROUPS.USER, + "password": SECRET_GROUPS.USER, + "uris": SECRET_GROUPS.USER, + "tls": SECRET_GROUPS.TLS, + "tls-ca": SECRET_GROUPS.TLS, + } + + def __init__( + self, + model: Model, + relation_name: str, + ) -> None: + self._model = model + self.local_app = self._model.app + self.local_unit = self._model.unit + self.relation_name = relation_name + self._jujuversion = None + self.component = self.local_app if self.SCOPE == Scope.APP else self.local_unit + self.secrets = SecretCache(self._model, self.component) + self.data_component = None + + @property + def relations(self) -> List[Relation]: + """The list of Relation instances associated with this relation_name.""" + return [ + relation + for relation in self._model.relations[self.relation_name] + if self._is_relation_active(relation) + ] + + @property + def secrets_enabled(self): + """Is this Juju version allowing for Secrets usage?""" + if not self._jujuversion: + self._jujuversion = JujuVersion.from_environ() + return self._jujuversion.has_secrets + + @property + def secret_label_map(self): + """Exposing secret-label map via a property -- could be overridden in descendants!""" + return self.SECRET_LABEL_MAP + + # Mandatory overrides for internal/helper methods + + @abstractmethod + def _get_relation_secret( + self, relation_id: int, group_mapping: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret that's been stored in the relation databag.""" + raise NotImplementedError + + @abstractmethod + def _fetch_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetch data available (directily or indirectly -- i.e. secrets) from the relation.""" + raise NotImplementedError + + @abstractmethod + def _fetch_my_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetch data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + raise NotImplementedError + + @abstractmethod + def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: + """Update data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + raise NotImplementedError + + @abstractmethod + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Delete data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + raise NotImplementedError + + # Internal helper methods + + @staticmethod + def _is_relation_active(relation: Relation): + """Whether the relation is active based on contained data.""" + try: + _ = repr(relation.data) + return True + except (RuntimeError, ModelError): + return False + + @staticmethod + def _is_secret_field(field: str) -> bool: + """Is the field in question a secret reference (URI) field or not?""" + return field.startswith(PROV_SECRET_PREFIX) + + @staticmethod + def _generate_secret_label( + relation_name: str, relation_id: int, group_mapping: SecretGroup + ) -> str: + """Generate unique group_mappings for secrets within a relation context.""" + return f"{relation_name}.{relation_id}.{group_mapping}.secret" + + def _generate_secret_field_name(self, group_mapping: SecretGroup) -> str: + """Generate unique group_mappings for secrets within a relation context.""" + return f"{PROV_SECRET_PREFIX}{group_mapping}" + + def _relation_from_secret_label(self, secret_label: str) -> Optional[Relation]: + """Retrieve the relation that belongs to a secret label.""" + contents = secret_label.split(".") + + if not (contents and len(contents) >= 3): + return + + contents.pop() # ".secret" at the end + contents.pop() # Group mapping + relation_id = contents.pop() + try: + relation_id = int(relation_id) + except ValueError: + return + + # In case '.' character appeared in relation name + relation_name = ".".join(contents) + + try: + return self.get_relation(relation_name, relation_id) + except ModelError: + return + + def _group_secret_fields(self, secret_fields: List[str]) -> Dict[SecretGroup, List[str]]: + """Helper function to arrange secret mappings under their group. + + NOTE: All unrecognized items end up in the 'extra' secret bucket. + Make sure only secret fields are passed! + """ + secret_fieldnames_grouped = {} + for key in secret_fields: + if group := self.secret_label_map.get(key): + secret_fieldnames_grouped.setdefault(group, []).append(key) + else: + secret_fieldnames_grouped.setdefault(SECRET_GROUPS.EXTRA, []).append(key) + return secret_fieldnames_grouped + + def _get_group_secret_contents( + self, + relation: Relation, + group: SecretGroup, + secret_fields: Union[Set[str], List[str]] = [], + ) -> Dict[str, str]: + """Helper function to retrieve collective, requested contents of a secret.""" + if (secret := self._get_relation_secret(relation.id, group)) and ( + secret_data := secret.get_content() + ): + return { + k: v for k, v in secret_data.items() if not secret_fields or k in secret_fields + } + return {} + + def _content_for_secret_group( + self, content: Dict[str, str], secret_fields: Set[str], group_mapping: SecretGroup + ) -> Dict[str, str]: + """Select : pairs from input, that belong to this particular Secret group.""" + if group_mapping == SECRET_GROUPS.EXTRA: + return { + k: v + for k, v in content.items() + if k in secret_fields and k not in self.secret_label_map.keys() + } + + return { + k: v + for k, v in content.items() + if k in secret_fields and self.secret_label_map.get(k) == group_mapping + } + + @juju_secrets_only + def _get_relation_secret_data( + self, relation_id: int, group_mapping: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[Dict[str, str]]: + """Retrieve contents of a Juju Secret that's been stored in the relation databag.""" + secret = self._get_relation_secret(relation_id, group_mapping, relation_name) + if secret: + return secret.get_content() + + # Core operations on Relation Fields manipulations (regardless whether the field is in the databag or in a secret) + # Internal functions to be called directly from transparent public interface functions (+closely related helpers) + + def _process_secret_fields( + self, + relation: Relation, + req_secret_fields: Optional[List[str]], + impacted_rel_fields: List[str], + operation: Callable, + *args, + **kwargs, + ) -> Tuple[Dict[str, str], Set[str]]: + """Isolate target secret fields of manipulation, and execute requested operation by Secret Group.""" + result = {} + + # If the relation started on a databag, we just stay on the databag + # (Rolling upgrades may result in a relation starting on databag, getting secrets enabled on-the-fly) + # self.local_app is sufficient to check (ignored if Requires, never has secrets -- works if Provider) + fallback_to_databag = ( + req_secret_fields + and (self.local_unit == self._model.unit and self.local_unit.is_leader()) + and set(req_secret_fields) & set(relation.data[self.component]) + ) + + normal_fields = set(impacted_rel_fields) + if req_secret_fields and self.secrets_enabled and not fallback_to_databag: + normal_fields = normal_fields - set(req_secret_fields) + secret_fields = set(impacted_rel_fields) - set(normal_fields) + + secret_fieldnames_grouped = self._group_secret_fields(list(secret_fields)) + + for group in secret_fieldnames_grouped: + # operation() should return nothing when all goes well + if group_result := operation(relation, group, secret_fields, *args, **kwargs): + # If "meaningful" data was returned, we take it. (Some 'operation'-s only return success/failure.) + if isinstance(group_result, dict): + result.update(group_result) + else: + # If it wasn't found as a secret, let's give it a 2nd chance as "normal" field + # Needed when Juju3 Requires meets Juju2 Provider + normal_fields |= set(secret_fieldnames_grouped[group]) + return (result, normal_fields) + + def _fetch_relation_data_without_secrets( + self, component: Union[Application, Unit], relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetching databag contents when no secrets are involved. + + Since the Provider's databag is the only one holding secrest, we can apply + a simplified workflow to read the Require's side's databag. + This is used typically when the Provider side wants to read the Requires side's data, + or when the Requires side may want to read its own data. + """ + if component not in relation.data or not relation.data[component]: + return {} + + if fields: + return { + k: relation.data[component][k] for k in fields if k in relation.data[component] + } + else: + return dict(relation.data[component]) + + def _fetch_relation_data_with_secrets( + self, + component: Union[Application, Unit], + req_secret_fields: Optional[List[str]], + relation: Relation, + fields: Optional[List[str]] = None, + ) -> Dict[str, str]: + """Fetching databag contents when secrets may be involved. + + This function has internal logic to resolve if a requested field may be "hidden" + within a Relation Secret, or directly available as a databag field. Typically + used to read the Provider side's databag (eigher by the Requires side, or by + Provider side itself). + """ + result = {} + normal_fields = [] + + if not fields: + if component not in relation.data: + return {} + + all_fields = list(relation.data[component].keys()) + normal_fields = [field for field in all_fields if not self._is_secret_field(field)] + fields = normal_fields + req_secret_fields if req_secret_fields else normal_fields + + if fields: + result, normal_fields = self._process_secret_fields( + relation, req_secret_fields, fields, self._get_group_secret_contents + ) + + # Processing "normal" fields. May include leftover from what we couldn't retrieve as a secret. + # (Typically when Juju3 Requires meets Juju2 Provider) + if normal_fields: + result.update( + self._fetch_relation_data_without_secrets(component, relation, list(normal_fields)) + ) + return result + + def _update_relation_data_without_secrets( + self, component: Union[Application, Unit], relation: Relation, data: Dict[str, str] + ) -> None: + """Updating databag contents when no secrets are involved.""" + if component not in relation.data or relation.data[component] is None: + return + + if relation: + relation.data[component].update(data) + + def _delete_relation_data_without_secrets( + self, component: Union[Application, Unit], relation: Relation, fields: List[str] + ) -> None: + """Remove databag fields 'fields' from Relation.""" + if component not in relation.data or relation.data[component] is None: + return + + for field in fields: + try: + relation.data[component].pop(field) + except KeyError: + logger.debug( + "Non-existing field '%s' was attempted to be removed from the databag (relation ID: %s)", + str(field), + str(relation.id), + ) + pass + + # Public interface methods + # Handling Relation Fields seamlessly, regardless if in databag or a Juju Secret + + def as_dict(self, relation_id: int) -> UserDict: + """Dict behavior representation of the Abstract Data.""" + return DataDict(self, relation_id) + + def get_relation(self, relation_name, relation_id) -> Relation: + """Safe way of retrieving a relation.""" + relation = self._model.get_relation(relation_name, relation_id) + + if not relation: + raise DataInterfacesError( + "Relation %s %s couldn't be retrieved", relation_name, relation_id + ) + + return relation + + def fetch_relation_data( + self, + relation_ids: Optional[List[int]] = None, + fields: Optional[List[str]] = None, + relation_name: Optional[str] = None, + ) -> Dict[int, Dict[str, str]]: + """Retrieves data from relation. + + This function can be used to retrieve data from a relation + in the charm code when outside an event callback. + Function cannot be used in `*-relation-broken` events and will raise an exception. + + Returns: + a dict of the values stored in the relation data bag + for all relation instances (indexed by the relation ID). + """ + if not relation_name: + relation_name = self.relation_name + + relations = [] + if relation_ids: + relations = [ + self.get_relation(relation_name, relation_id) for relation_id in relation_ids + ] + else: + relations = self.relations + + data = {} + for relation in relations: + if not relation_ids or (relation_ids and relation.id in relation_ids): + data[relation.id] = self._fetch_specific_relation_data(relation, fields) + return data + + def fetch_relation_field( + self, relation_id: int, field: str, relation_name: Optional[str] = None + ) -> Optional[str]: + """Get a single field from the relation data.""" + return ( + self.fetch_relation_data([relation_id], [field], relation_name) + .get(relation_id, {}) + .get(field) + ) + + def fetch_my_relation_data( + self, + relation_ids: Optional[List[int]] = None, + fields: Optional[List[str]] = None, + relation_name: Optional[str] = None, + ) -> Optional[Dict[int, Dict[str, str]]]: + """Fetch data of the 'owner' (or 'this app') side of the relation. + + NOTE: Since only the leader can read the relation's 'this_app'-side + Application databag, the functionality is limited to leaders + """ + if not relation_name: + relation_name = self.relation_name + + relations = [] + if relation_ids: + relations = [ + self.get_relation(relation_name, relation_id) for relation_id in relation_ids + ] + else: + relations = self.relations + + data = {} + for relation in relations: + if not relation_ids or relation.id in relation_ids: + data[relation.id] = self._fetch_my_specific_relation_data(relation, fields) + return data + + def fetch_my_relation_field( + self, relation_id: int, field: str, relation_name: Optional[str] = None + ) -> Optional[str]: + """Get a single field from the relation data -- owner side. + + NOTE: Since only the leader can read the relation's 'this_app'-side + Application databag, the functionality is limited to leaders + """ + if relation_data := self.fetch_my_relation_data([relation_id], [field], relation_name): + return relation_data.get(relation_id, {}).get(field) + + @leader_only + def update_relation_data(self, relation_id: int, data: dict) -> None: + """Update the data within the relation.""" + relation_name = self.relation_name + relation = self.get_relation(relation_name, relation_id) + return self._update_relation_data(relation, data) + + @leader_only + def delete_relation_data(self, relation_id: int, fields: List[str]) -> None: + """Remove field from the relation.""" + relation_name = self.relation_name + relation = self.get_relation(relation_name, relation_id) + return self._delete_relation_data(relation, fields) + + +class EventHandlers(Object): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: Data, unique_key: str = ""): + """Manager of base client relations.""" + if not unique_key: + unique_key = relation_data.relation_name + super().__init__(charm, unique_key) + + self.charm = charm + self.relation_data = relation_data + + self.framework.observe( + charm.on[self.relation_data.relation_name].relation_changed, + self._on_relation_changed_event, + ) + + def _diff(self, event: RelationChangedEvent) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + return diff(event, self.relation_data.data_component) + + @abstractmethod + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation data has changed.""" + raise NotImplementedError + + +# Base ProviderData and RequiresData + + +class ProviderData(Data): + """Base provides-side of the data products relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + ) -> None: + super().__init__(model, relation_name) + self.data_component = self.local_app + + # Private methods handling secrets + + @juju_secrets_only + def _add_relation_secret( + self, + relation: Relation, + group_mapping: SecretGroup, + secret_fields: Set[str], + data: Dict[str, str], + uri_to_databag=True, + ) -> bool: + """Add a new Juju Secret that will be registered in the relation databag.""" + secret_field = self._generate_secret_field_name(group_mapping) + if uri_to_databag and relation.data[self.component].get(secret_field): + logging.error("Secret for relation %s already exists, not adding again", relation.id) + return False + + content = self._content_for_secret_group(data, secret_fields, group_mapping) + + label = self._generate_secret_label(self.relation_name, relation.id, group_mapping) + secret = self.secrets.add(label, content, relation) + + # According to lint we may not have a Secret ID + if uri_to_databag and secret.meta and secret.meta.id: + relation.data[self.component][secret_field] = secret.meta.id + + # Return the content that was added + return True + + @juju_secrets_only + def _update_relation_secret( + self, + relation: Relation, + group_mapping: SecretGroup, + secret_fields: Set[str], + data: Dict[str, str], + ) -> bool: + """Update the contents of an existing Juju Secret, referred in the relation databag.""" + secret = self._get_relation_secret(relation.id, group_mapping) + + if not secret: + logging.error("Can't update secret for relation %s", relation.id) + return False + + content = self._content_for_secret_group(data, secret_fields, group_mapping) + + old_content = secret.get_content() + full_content = copy.deepcopy(old_content) + full_content.update(content) + secret.set_content(full_content) + + # Return True on success + return True + + def _add_or_update_relation_secrets( + self, + relation: Relation, + group: SecretGroup, + secret_fields: Set[str], + data: Dict[str, str], + uri_to_databag=True, + ) -> bool: + """Update contents for Secret group. If the Secret doesn't exist, create it.""" + if self._get_relation_secret(relation.id, group): + return self._update_relation_secret(relation, group, secret_fields, data) + else: + return self._add_relation_secret(relation, group, secret_fields, data, uri_to_databag) + + @juju_secrets_only + def _delete_relation_secret( + self, relation: Relation, group: SecretGroup, secret_fields: List[str], fields: List[str] + ) -> bool: + """Update the contents of an existing Juju Secret, referred in the relation databag.""" + secret = self._get_relation_secret(relation.id, group) + + if not secret: + logging.error("Can't delete secret for relation %s", str(relation.id)) + return False + + old_content = secret.get_content() + new_content = copy.deepcopy(old_content) + for field in fields: + try: + new_content.pop(field) + except KeyError: + logging.debug( + "Non-existing secret was attempted to be removed %s, %s", + str(relation.id), + str(field), + ) + return False + + # Remove secret from the relation if it's fully gone + if not new_content: + field = self._generate_secret_field_name(group) + try: + relation.data[self.component].pop(field) + except KeyError: + pass + label = self._generate_secret_label(self.relation_name, relation.id, group) + self.secrets.remove(label) + else: + secret.set_content(new_content) + + # Return the content that was removed + return True + + # Mandatory internal overrides + + @juju_secrets_only + def _get_relation_secret( + self, relation_id: int, group_mapping: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret that's been stored in the relation databag.""" + if not relation_name: + relation_name = self.relation_name + + label = self._generate_secret_label(relation_name, relation_id, group_mapping) + if secret := self.secrets.get(label): + return secret + + relation = self._model.get_relation(relation_name, relation_id) + if not relation: + return + + secret_field = self._generate_secret_field_name(group_mapping) + if secret_uri := relation.data[self.local_app].get(secret_field): + return self.secrets.get(label, secret_uri) + + def _fetch_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetching relation data for Provider. + + NOTE: Since all secret fields are in the Provider side of the databag, we don't need to worry about that + """ + if not relation.app: + return {} + + return self._fetch_relation_data_without_secrets(relation.app, relation, fields) + + def _fetch_my_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> dict: + """Fetching our own relation data.""" + secret_fields = None + if relation.app: + secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS) + + return self._fetch_relation_data_with_secrets( + self.local_app, + secret_fields, + relation, + fields, + ) + + def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: + """Set values for fields not caring whether it's a secret or not.""" + req_secret_fields = [] + if relation.app: + req_secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS) + + _, normal_fields = self._process_secret_fields( + relation, + req_secret_fields, + list(data), + self._add_or_update_relation_secrets, + data=data, + ) + + normal_content = {k: v for k, v in data.items() if k in normal_fields} + self._update_relation_data_without_secrets(self.local_app, relation, normal_content) + + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Delete fields from the Relation not caring whether it's a secret or not.""" + req_secret_fields = [] + if relation.app: + req_secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS) + + _, normal_fields = self._process_secret_fields( + relation, req_secret_fields, fields, self._delete_relation_secret, fields=fields + ) + self._delete_relation_data_without_secrets(self.local_app, relation, list(normal_fields)) + + # Public methods - "native" + + def set_credentials(self, relation_id: int, username: str, password: str) -> None: + """Set credentials. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + username: user that was created. + password: password of the created user. + """ + self.update_relation_data(relation_id, {"username": username, "password": password}) + + def set_tls(self, relation_id: int, tls: str) -> None: + """Set whether TLS is enabled. + + Args: + relation_id: the identifier for a particular relation. + tls: whether tls is enabled (True or False). + """ + self.update_relation_data(relation_id, {"tls": tls}) + + def set_tls_ca(self, relation_id: int, tls_ca: str) -> None: + """Set the TLS CA in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + tls_ca: TLS certification authority. + """ + self.update_relation_data(relation_id, {"tls-ca": tls_ca}) + + # Public functions -- inherited + + fetch_my_relation_data = leader_only(Data.fetch_my_relation_data) + fetch_my_relation_field = leader_only(Data.fetch_my_relation_field) + + +class RequirerData(Data): + """Requirer-side of the relation.""" + + SECRET_FIELDS = ["username", "password", "tls", "tls-ca", "uris"] + + def __init__( + self, + model, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ): + """Manager of base client relations.""" + super().__init__(model, relation_name) + self.extra_user_roles = extra_user_roles + self._secret_fields = list(self.SECRET_FIELDS) + if additional_secret_fields: + self._secret_fields += additional_secret_fields + self.data_component = self.local_unit + + @property + def secret_fields(self) -> Optional[List[str]]: + """Local access to secrets field, in case they are being used.""" + if self.secrets_enabled: + return self._secret_fields + + # Internal helper functions + + def _register_secret_to_relation( + self, relation_name: str, relation_id: int, secret_id: str, group: SecretGroup + ): + """Fetch secrets and apply local label on them. + + [MAGIC HERE] + If we fetch a secret using get_secret(id=, label=), + then will be "stuck" on the Secret object, whenever it may + appear (i.e. as an event attribute, or fetched manually) on future occasions. + + This will allow us to uniquely identify the secret on Provider side (typically on + 'secret-changed' events), and map it to the corresponding relation. + """ + label = self._generate_secret_label(relation_name, relation_id, group) + + # Fetching the Secret's meta information ensuring that it's locally getting registered with + CachedSecret(self._model, self.component, label, secret_id).meta + + def _register_secrets_to_relation(self, relation: Relation, params_name_list: List[str]): + """Make sure that secrets of the provided list are locally 'registered' from the databag. + + More on 'locally registered' magic is described in _register_secret_to_relation() method + """ + if not relation.app: + return + + for group in SECRET_GROUPS.groups(): + secret_field = self._generate_secret_field_name(group) + if secret_field in params_name_list: + if secret_uri := relation.data[relation.app].get(secret_field): + self._register_secret_to_relation( + relation.name, relation.id, secret_uri, group + ) + + def _is_resource_created_for_relation(self, relation: Relation) -> bool: + if not relation.app: + return False + + data = self.fetch_relation_data([relation.id], ["username", "password"]).get( + relation.id, {} + ) + return bool(data.get("username")) and bool(data.get("password")) + + def is_resource_created(self, relation_id: Optional[int] = None) -> bool: + """Check if the resource has been created. + + This function can be used to check if the Provider answered with data in the charm code + when outside an event callback. + + Args: + relation_id (int, optional): When provided the check is done only for the relation id + provided, otherwise the check is done for all relations + + Returns: + True or False + + Raises: + IndexError: If relation_id is provided but that relation does not exist + """ + if relation_id is not None: + try: + relation = [relation for relation in self.relations if relation.id == relation_id][ + 0 + ] + return self._is_resource_created_for_relation(relation) + except IndexError: + raise IndexError(f"relation id {relation_id} cannot be accessed") + else: + return ( + all( + self._is_resource_created_for_relation(relation) for relation in self.relations + ) + if self.relations + else False + ) + + # Mandatory internal overrides + + @juju_secrets_only + def _get_relation_secret( + self, relation_id: int, group: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret that's been stored in the relation databag.""" + if not relation_name: + relation_name = self.relation_name + + label = self._generate_secret_label(relation_name, relation_id, group) + return self.secrets.get(label) + + def _fetch_specific_relation_data( + self, relation, fields: Optional[List[str]] = None + ) -> Dict[str, str]: + """Fetching Requirer data -- that may include secrets.""" + if not relation.app: + return {} + return self._fetch_relation_data_with_secrets( + relation.app, self.secret_fields, relation, fields + ) + + def _fetch_my_specific_relation_data(self, relation, fields: Optional[List[str]]) -> dict: + """Fetching our own relation data.""" + return self._fetch_relation_data_without_secrets(self.local_app, relation, fields) + + def _update_relation_data(self, relation: Relation, data: dict) -> None: + """Updates a set of key-value pairs in the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation: the particular relation. + data: dict containing the key-value pairs + that should be updated in the relation. + """ + return self._update_relation_data_without_secrets(self.local_app, relation, data) + + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Deletes a set of fields from the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation: the particular relation. + fields: list containing the field names that should be removed from the relation. + """ + return self._delete_relation_data_without_secrets(self.local_app, relation, fields) + + # Public functions -- inherited + + fetch_my_relation_data = leader_only(Data.fetch_my_relation_data) + fetch_my_relation_field = leader_only(Data.fetch_my_relation_field) + + +class RequirerEventHandlers(EventHandlers): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: RequirerData, unique_key: str = ""): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + + self.framework.observe( + self.charm.on[relation_data.relation_name].relation_created, + self._on_relation_created_event, + ) + self.framework.observe( + charm.on.secret_changed, + self._on_secret_changed_event, + ) + + # Event handlers + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the relation is created.""" + if not self.relation_data.local_unit.is_leader(): + return + + if self.relation_data.secret_fields: # pyright: ignore [reportAttributeAccessIssue] + set_encoded_field( + event.relation, + self.relation_data.component, + REQ_SECRET_FIELDS, + self.relation_data.secret_fields, # pyright: ignore [reportAttributeAccessIssue] + ) + + @abstractmethod + def _on_secret_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation data has changed.""" + raise NotImplementedError + + +################################################################################ +# Peer Relation Data +################################################################################ + + +class DataPeerData(RequirerData, ProviderData): + """Represents peer relations data.""" + + SECRET_FIELDS = [] + SECRET_FIELD_NAME = "internal_secret" + SECRET_LABEL_MAP = {} + + def __init__( + self, + model, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + ): + """Manager of base client relations.""" + RequirerData.__init__( + self, + model, + relation_name, + extra_user_roles, + additional_secret_fields, + ) + self.secret_field_name = secret_field_name if secret_field_name else self.SECRET_FIELD_NAME + self.deleted_label = deleted_label + self._secret_label_map = {} + # Secrets that are being dynamically added within the scope of this event handler run + self._new_secrets = [] + self._additional_secret_group_mapping = additional_secret_group_mapping + + for group, fields in additional_secret_group_mapping.items(): + if group not in SECRET_GROUPS.groups(): + setattr(SECRET_GROUPS, group, group) + for field in fields: + secret_group = SECRET_GROUPS.get_group(group) + internal_field = self._field_to_internal_name(field, secret_group) + self._secret_label_map.setdefault(group, []).append(internal_field) + self._secret_fields.append(internal_field) + + @property + def scope(self) -> Optional[Scope]: + """Turn component information into Scope.""" + if isinstance(self.component, Application): + return Scope.APP + if isinstance(self.component, Unit): + return Scope.UNIT + + @property + def secret_label_map(self) -> Dict[str, str]: + """Property storing secret mappings.""" + return self._secret_label_map + + @property + def static_secret_fields(self) -> List[str]: + """Re-definition of the property in a way that dynamically extended list is retrieved.""" + return self._secret_fields + + @property + def secret_fields(self) -> List[str]: + """Re-definition of the property in a way that dynamically extended list is retrieved.""" + return ( + self.static_secret_fields if self.static_secret_fields else self.current_secret_fields + ) + + @property + def current_secret_fields(self) -> List[str]: + """Helper method to get all currently existing secret fields (added statically or dynamically).""" + if not self.secrets_enabled: + return [] + + if len(self._model.relations[self.relation_name]) > 1: + raise ValueError(f"More than one peer relation on {self.relation_name}") + + relation = self._model.relations[self.relation_name][0] + fields = [] + + ignores = [SECRET_GROUPS.get_group("user"), SECRET_GROUPS.get_group("tls")] + for group in SECRET_GROUPS.groups(): + if group in ignores: + continue + if content := self._get_group_secret_contents(relation, group): + fields += list(content.keys()) + return list(set(fields) | set(self._new_secrets)) + + @dynamic_secrets_only + def set_secret( + self, + relation_id: int, + field: str, + value: str, + group_mapping: Optional[SecretGroup] = None, + ) -> None: + """Public interface method to add a Relation Data field specifically as a Juju Secret. + + Args: + relation_id: ID of the relation + field: The secret field that is to be added + value: The string value of the secret + group_mapping: The name of the "secret group", in case the field is to be added to an existing secret + """ + full_field = self._field_to_internal_name(field, group_mapping) + if self.secrets_enabled and full_field not in self.current_secret_fields: + self._new_secrets.append(full_field) + if self._no_group_with_databag(field, full_field): + self.update_relation_data(relation_id, {full_field: value}) + + # Unlike for set_secret(), there's no harm using this operation with static secrets + # The restricion is only added to keep the concept clear + @dynamic_secrets_only + def get_secret( + self, + relation_id: int, + field: str, + group_mapping: Optional[SecretGroup] = None, + ) -> Optional[str]: + """Public interface method to fetch secrets only.""" + full_field = self._field_to_internal_name(field, group_mapping) + if ( + self.secrets_enabled + and full_field not in self.current_secret_fields + and field not in self.current_secret_fields + ): + return + if self._no_group_with_databag(field, full_field): + return self.fetch_my_relation_field(relation_id, full_field) + + @dynamic_secrets_only + def delete_secret( + self, + relation_id: int, + field: str, + group_mapping: Optional[SecretGroup] = None, + ) -> Optional[str]: + """Public interface method to delete secrets only.""" + full_field = self._field_to_internal_name(field, group_mapping) + if self.secrets_enabled and full_field not in self.current_secret_fields: + logger.warning(f"Secret {field} from group {group_mapping} was not found") + return + if self._no_group_with_databag(field, full_field): + self.delete_relation_data(relation_id, [full_field]) + + # Helpers + + @staticmethod + def _field_to_internal_name(field: str, group: Optional[SecretGroup]) -> str: + if not group or group == SECRET_GROUPS.EXTRA: + return field + return f"{field}{GROUP_SEPARATOR}{group}" + + @staticmethod + def _internal_name_to_field(name: str) -> Tuple[str, SecretGroup]: + parts = name.split(GROUP_SEPARATOR) + if not len(parts) > 1: + return (parts[0], SECRET_GROUPS.EXTRA) + secret_group = SECRET_GROUPS.get_group(parts[1]) + if not secret_group: + raise ValueError(f"Invalid secret field {name}") + return (parts[0], secret_group) + + def _group_secret_fields(self, secret_fields: List[str]) -> Dict[SecretGroup, List[str]]: + """Helper function to arrange secret mappings under their group. + + NOTE: All unrecognized items end up in the 'extra' secret bucket. + Make sure only secret fields are passed! + """ + secret_fieldnames_grouped = {} + for key in secret_fields: + field, group = self._internal_name_to_field(key) + secret_fieldnames_grouped.setdefault(group, []).append(field) + return secret_fieldnames_grouped + + def _content_for_secret_group( + self, content: Dict[str, str], secret_fields: Set[str], group_mapping: SecretGroup + ) -> Dict[str, str]: + """Select : pairs from input, that belong to this particular Secret group.""" + if group_mapping == SECRET_GROUPS.EXTRA: + return {k: v for k, v in content.items() if k in self.secret_fields} + return { + self._internal_name_to_field(k)[0]: v + for k, v in content.items() + if k in self.secret_fields + } + + # Backwards compatibility + + def _check_deleted_label(self, relation, fields) -> None: + """Helper function for legacy behavior.""" + current_data = self.fetch_my_relation_data([relation.id], fields) + if current_data is not None: + # Check if the secret we wanna delete actually exists + # Given the "deleted label", here we can't rely on the default mechanism (i.e. 'key not found') + if non_existent := (set(fields) & set(self.secret_fields)) - set( + current_data.get(relation.id, []) + ): + logger.debug( + "Non-existing secret %s was attempted to be removed.", + ", ".join(non_existent), + ) + + def _remove_secret_from_databag(self, relation, fields: List[str]) -> None: + """For Rolling Upgrades -- when moving from databag to secrets usage. + + Practically what happens here is to remove stuff from the databag that is + to be stored in secrets. + """ + if not self.secret_fields: + return + + secret_fields_passed = set(self.secret_fields) & set(fields) + for field in secret_fields_passed: + if self._fetch_relation_data_without_secrets(self.component, relation, [field]): + self._delete_relation_data_without_secrets(self.component, relation, [field]) + + def _remove_secret_field_name_from_databag(self, relation) -> None: + """Making sure that the old databag URI is gone. + + This action should not be executed more than once. + """ + # Nothing to do if 'internal-secret' is not in the databag + if not (relation.data[self.component].get(self._generate_secret_field_name())): + return + + # Making sure that the secret receives its label + # (This should have happened by the time we get here, rather an extra security measure.) + secret = self._get_relation_secret(relation.id) + + # Either app scope secret with leader executing, or unit scope secret + leader_or_unit_scope = self.component != self.local_app or self.local_unit.is_leader() + if secret and leader_or_unit_scope: + # Databag reference to the secret URI can be removed, now that it's labelled + relation.data[self.component].pop(self._generate_secret_field_name(), None) + + def _previous_labels(self) -> List[str]: + """Generator for legacy secret label names, for backwards compatibility.""" + result = [] + members = [self._model.app.name] + if self.scope: + members.append(self.scope.value) + result.append(f"{'.'.join(members)}") + return result + + def _no_group_with_databag(self, field: str, full_field: str) -> bool: + """Check that no secret group is attempted to be used together with databag.""" + if not self.secrets_enabled and full_field != field: + logger.error( + f"Can't access {full_field}: no secrets available (i.e. no secret groups either)." + ) + return False + return True + + # Event handlers + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + pass + + def _on_secret_changed_event(self, event: SecretChangedEvent) -> None: + """Event emitted when the secret has changed.""" + pass + + # Overrides of Relation Data handling functions + + def _generate_secret_label( + self, relation_name: str, relation_id: int, group_mapping: SecretGroup + ) -> str: + members = [relation_name, self._model.app.name] + if self.scope: + members.append(self.scope.value) + if group_mapping != SECRET_GROUPS.EXTRA: + members.append(group_mapping) + return f"{'.'.join(members)}" + + def _generate_secret_field_name(self, group_mapping: SecretGroup = SECRET_GROUPS.EXTRA) -> str: + """Generate unique group_mappings for secrets within a relation context.""" + return f"{self.secret_field_name}" + + @juju_secrets_only + def _get_relation_secret( + self, + relation_id: int, + group_mapping: SecretGroup = SECRET_GROUPS.EXTRA, + relation_name: Optional[str] = None, + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret specifically for peer relations. + + In case this code may be executed within a rolling upgrade, and we may need to + migrate secrets from the databag to labels, we make sure to stick the correct + label on the secret, and clean up the local databag. + """ + if not relation_name: + relation_name = self.relation_name + + relation = self._model.get_relation(relation_name, relation_id) + if not relation: + return + + label = self._generate_secret_label(relation_name, relation_id, group_mapping) + secret_uri = relation.data[self.component].get(self._generate_secret_field_name(), None) + + # URI or legacy label is only to applied when moving single legacy secret to a (new) label + if group_mapping == SECRET_GROUPS.EXTRA: + # Fetching the secret with fallback to URI (in case label is not yet known) + # Label would we "stuck" on the secret in case it is found + return self.secrets.get(label, secret_uri, legacy_labels=self._previous_labels()) + return self.secrets.get(label) + + def _get_group_secret_contents( + self, + relation: Relation, + group: SecretGroup, + secret_fields: Union[Set[str], List[str]] = [], + ) -> Dict[str, str]: + """Helper function to retrieve collective, requested contents of a secret.""" + secret_fields = [self._internal_name_to_field(k)[0] for k in secret_fields] + result = super()._get_group_secret_contents(relation, group, secret_fields) + if self.deleted_label: + result = {key: result[key] for key in result if result[key] != self.deleted_label} + if self._additional_secret_group_mapping: + return {self._field_to_internal_name(key, group): result[key] for key in result} + return result + + @either_static_or_dynamic_secrets + def _fetch_my_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetch data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + return self._fetch_relation_data_with_secrets( + self.component, self.secret_fields, relation, fields + ) + + @either_static_or_dynamic_secrets + def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: + """Update data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + self._remove_secret_from_databag(relation, list(data.keys())) + _, normal_fields = self._process_secret_fields( + relation, + self.secret_fields, + list(data), + self._add_or_update_relation_secrets, + data=data, + uri_to_databag=False, + ) + self._remove_secret_field_name_from_databag(relation) + + normal_content = {k: v for k, v in data.items() if k in normal_fields} + self._update_relation_data_without_secrets(self.component, relation, normal_content) + + @either_static_or_dynamic_secrets + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Delete data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + if self.secret_fields and self.deleted_label: + # Legacy, backwards compatibility + self._check_deleted_label(relation, fields) + + _, normal_fields = self._process_secret_fields( + relation, + self.secret_fields, + fields, + self._update_relation_secret, + data={field: self.deleted_label for field in fields}, + ) + else: + _, normal_fields = self._process_secret_fields( + relation, self.secret_fields, fields, self._delete_relation_secret, fields=fields + ) + self._delete_relation_data_without_secrets(self.component, relation, list(normal_fields)) + + def fetch_relation_data( + self, + relation_ids: Optional[List[int]] = None, + fields: Optional[List[str]] = None, + relation_name: Optional[str] = None, + ) -> Dict[int, Dict[str, str]]: + """This method makes no sense for a Peer Relation.""" + raise NotImplementedError( + "Peer Relation only supports 'self-side' fetch methods: " + "fetch_my_relation_data() and fetch_my_relation_field()" + ) + + def fetch_relation_field( + self, relation_id: int, field: str, relation_name: Optional[str] = None + ) -> Optional[str]: + """This method makes no sense for a Peer Relation.""" + raise NotImplementedError( + "Peer Relation only supports 'self-side' fetch methods: " + "fetch_my_relation_data() and fetch_my_relation_field()" + ) + + # Public functions -- inherited + + fetch_my_relation_data = Data.fetch_my_relation_data + fetch_my_relation_field = Data.fetch_my_relation_field + + +class DataPeerEventHandlers(RequirerEventHandlers): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: RequirerData, unique_key: str = ""): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + pass + + def _on_secret_changed_event(self, event: SecretChangedEvent) -> None: + """Event emitted when the secret has changed.""" + pass + + +class DataPeer(DataPeerData, DataPeerEventHandlers): + """Represents peer relations.""" + + def __init__( + self, + charm, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + unique_key: str = "", + ): + DataPeerData.__init__( + self, + charm.model, + relation_name, + extra_user_roles, + additional_secret_fields, + additional_secret_group_mapping, + secret_field_name, + deleted_label, + ) + DataPeerEventHandlers.__init__(self, charm, self, unique_key) + + +class DataPeerUnitData(DataPeerData): + """Unit data abstraction representation.""" + + SCOPE = Scope.UNIT + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + +class DataPeerUnit(DataPeerUnitData, DataPeerEventHandlers): + """Unit databag representation.""" + + def __init__( + self, + charm, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + unique_key: str = "", + ): + DataPeerData.__init__( + self, + charm.model, + relation_name, + extra_user_roles, + additional_secret_fields, + additional_secret_group_mapping, + secret_field_name, + deleted_label, + ) + DataPeerEventHandlers.__init__(self, charm, self, unique_key) + + +class DataPeerOtherUnitData(DataPeerUnitData): + """Unit data abstraction representation.""" + + def __init__(self, unit: Unit, *args, **kwargs): + super().__init__(*args, **kwargs) + self.local_unit = unit + self.component = unit + + def update_relation_data(self, relation_id: int, data: dict) -> None: + """This method makes no sense for a Other Peer Relation.""" + raise NotImplementedError("It's not possible to update data of another unit.") + + def delete_relation_data(self, relation_id: int, fields: List[str]) -> None: + """This method makes no sense for a Other Peer Relation.""" + raise NotImplementedError("It's not possible to delete data of another unit.") + + +class DataPeerOtherUnitEventHandlers(DataPeerEventHandlers): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: DataPeerUnitData): + """Manager of base client relations.""" + unique_key = f"{relation_data.relation_name}-{relation_data.local_unit.name}" + super().__init__(charm, relation_data, unique_key=unique_key) + + +class DataPeerOtherUnit(DataPeerOtherUnitData, DataPeerOtherUnitEventHandlers): + """Unit databag representation for another unit than the executor.""" + + def __init__( + self, + unit: Unit, + charm: CharmBase, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + ): + DataPeerOtherUnitData.__init__( + self, + unit, + charm.model, + relation_name, + extra_user_roles, + additional_secret_fields, + additional_secret_group_mapping, + secret_field_name, + deleted_label, + ) + DataPeerOtherUnitEventHandlers.__init__(self, charm, self) + + +################################################################################ +# Cross-charm Relatoins Data Handling and Evenets +################################################################################ + +# Generic events + + +class ExtraRoleEvent(RelationEvent): + """Base class for data events.""" + + @property + def extra_user_roles(self) -> Optional[str]: + """Returns the extra user roles that were requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("extra-user-roles") + + +class RelationEventWithSecret(RelationEvent): + """Base class for Relation Events that need to handle secrets.""" + + @property + def _secrets(self) -> dict: + """Caching secrets to avoid fetching them each time a field is referrd. + + DON'T USE the encapsulated helper variable outside of this function + """ + if not hasattr(self, "_cached_secrets"): + self._cached_secrets = {} + return self._cached_secrets + + def _get_secret(self, group) -> Optional[Dict[str, str]]: + """Retrieving secrets.""" + if not self.app: + return + if not self._secrets.get(group): + self._secrets[group] = None + secret_field = f"{PROV_SECRET_PREFIX}{group}" + if secret_uri := self.relation.data[self.app].get(secret_field): + secret = self.framework.model.get_secret(id=secret_uri) + self._secrets[group] = secret.get_content() + return self._secrets[group] + + @property + def secrets_enabled(self): + """Is this Juju version allowing for Secrets usage?""" + return JujuVersion.from_environ().has_secrets + + +class AuthenticationEvent(RelationEventWithSecret): + """Base class for authentication fields for events. + + The amount of logic added here is not ideal -- but this was the only way to preserve + the interface when moving to Juju Secrets + """ + + @property + def username(self) -> Optional[str]: + """Returns the created username.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("user") + if secret: + return secret.get("username") + + return self.relation.data[self.relation.app].get("username") + + @property + def password(self) -> Optional[str]: + """Returns the password for the created user.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("user") + if secret: + return secret.get("password") + + return self.relation.data[self.relation.app].get("password") + + @property + def tls(self) -> Optional[str]: + """Returns whether TLS is configured.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("tls") + if secret: + return secret.get("tls") + + return self.relation.data[self.relation.app].get("tls") + + @property + def tls_ca(self) -> Optional[str]: + """Returns TLS CA.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("tls") + if secret: + return secret.get("tls-ca") + + return self.relation.data[self.relation.app].get("tls-ca") + + +# Database related events and fields + + +class DatabaseProvidesEvent(RelationEvent): + """Base class for database events.""" + + @property + def database(self) -> Optional[str]: + """Returns the database that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("database") + + +class DatabaseRequestedEvent(DatabaseProvidesEvent, ExtraRoleEvent): + """Event emitted when a new database is requested for use on this relation.""" + + @property + def external_node_connectivity(self) -> bool: + """Returns the requested external_node_connectivity field.""" + if not self.relation.app: + return False + + return ( + self.relation.data[self.relation.app].get("external-node-connectivity", "false") + == "true" + ) + + +class DatabaseProvidesEvents(CharmEvents): + """Database events. + + This class defines the events that the database can emit. + """ + + database_requested = EventSource(DatabaseRequestedEvent) + + +class DatabaseRequiresEvent(RelationEventWithSecret): + """Base class for database events.""" + + @property + def database(self) -> Optional[str]: + """Returns the database name.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("database") + + @property + def endpoints(self) -> Optional[str]: + """Returns a comma separated list of read/write endpoints. + + In VM charms, this is the primary's address. + In kubernetes charms, this is the service to the primary pod. + """ + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("endpoints") + + @property + def read_only_endpoints(self) -> Optional[str]: + """Returns a comma separated list of read only endpoints. + + In VM charms, this is the address of all the secondary instances. + In kubernetes charms, this is the service to all replica pod instances. + """ + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("read-only-endpoints") + + @property + def replset(self) -> Optional[str]: + """Returns the replicaset name. + + MongoDB only. + """ + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("replset") + + @property + def uris(self) -> Optional[str]: + """Returns the connection URIs. + + MongoDB, Redis, OpenSearch. + """ + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("user") + if secret: + return secret.get("uris") + + return self.relation.data[self.relation.app].get("uris") + + @property + def version(self) -> Optional[str]: + """Returns the version of the database. + + Version as informed by the database daemon. + """ + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("version") + + +class DatabaseCreatedEvent(AuthenticationEvent, DatabaseRequiresEvent): + """Event emitted when a new database is created for use on this relation.""" + + +class DatabaseEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent): + """Event emitted when the read/write endpoints are changed.""" + + +class DatabaseReadOnlyEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent): + """Event emitted when the read only endpoints are changed.""" + + +class DatabaseRequiresEvents(CharmEvents): + """Database events. + + This class defines the events that the database can emit. + """ + + database_created = EventSource(DatabaseCreatedEvent) + endpoints_changed = EventSource(DatabaseEndpointsChangedEvent) + read_only_endpoints_changed = EventSource(DatabaseReadOnlyEndpointsChangedEvent) + + +# Database Provider and Requires + + +class DatabaseProviderData(ProviderData): + """Provider-side data of the database relations.""" + + def __init__(self, model: Model, relation_name: str) -> None: + super().__init__(model, relation_name) + + def set_database(self, relation_id: int, database_name: str) -> None: + """Set database name. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + database_name: database name. + """ + self.update_relation_data(relation_id, {"database": database_name}) + + def set_endpoints(self, relation_id: int, connection_strings: str) -> None: + """Set database primary connections. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + In VM charms, only the primary's address should be passed as an endpoint. + In kubernetes charms, the service endpoint to the primary pod should be + passed as an endpoint. + + Args: + relation_id: the identifier for a particular relation. + connection_strings: database hosts and ports comma separated list. + """ + self.update_relation_data(relation_id, {"endpoints": connection_strings}) + + def set_read_only_endpoints(self, relation_id: int, connection_strings: str) -> None: + """Set database replicas connection strings. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + connection_strings: database hosts and ports comma separated list. + """ + self.update_relation_data(relation_id, {"read-only-endpoints": connection_strings}) + + def set_replset(self, relation_id: int, replset: str) -> None: + """Set replica set name in the application relation databag. + + MongoDB only. + + Args: + relation_id: the identifier for a particular relation. + replset: replica set name. + """ + self.update_relation_data(relation_id, {"replset": replset}) + + def set_uris(self, relation_id: int, uris: str) -> None: + """Set the database connection URIs in the application relation databag. + + MongoDB, Redis, and OpenSearch only. + + Args: + relation_id: the identifier for a particular relation. + uris: connection URIs. + """ + self.update_relation_data(relation_id, {"uris": uris}) + + def set_version(self, relation_id: int, version: str) -> None: + """Set the database version in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + version: database version. + """ + self.update_relation_data(relation_id, {"version": version}) + + def set_subordinated(self, relation_id: int) -> None: + """Raises the subordinated flag in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + """ + self.update_relation_data(relation_id, {"subordinated": "true"}) + + +class DatabaseProviderEventHandlers(EventHandlers): + """Provider-side of the database relation handlers.""" + + on = DatabaseProvidesEvents() # pyright: ignore [reportAssignmentType] + + def __init__( + self, charm: CharmBase, relation_data: DatabaseProviderData, unique_key: str = "" + ): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + # Just to calm down pyright, it can't parse that the same type is being used in the super() call above + self.relation_data = relation_data + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Leader only + if not self.relation_data.local_unit.is_leader(): + return + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit a database requested event if the setup key (database name and optional + # extra user roles) was added to the relation databag by the application. + if "database" in diff.added: + getattr(self.on, "database_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + +class DatabaseProvides(DatabaseProviderData, DatabaseProviderEventHandlers): + """Provider-side of the database relations.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + DatabaseProviderData.__init__(self, charm.model, relation_name) + DatabaseProviderEventHandlers.__init__(self, charm, self) + + +class DatabaseRequirerData(RequirerData): + """Requirer-side of the database relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + database_name: str, + extra_user_roles: Optional[str] = None, + relations_aliases: Optional[List[str]] = None, + additional_secret_fields: Optional[List[str]] = [], + external_node_connectivity: bool = False, + ): + """Manager of database client relations.""" + super().__init__(model, relation_name, extra_user_roles, additional_secret_fields) + self.database = database_name + self.relations_aliases = relations_aliases + self.external_node_connectivity = external_node_connectivity + + def is_postgresql_plugin_enabled(self, plugin: str, relation_index: int = 0) -> bool: + """Returns whether a plugin is enabled in the database. + + Args: + plugin: name of the plugin to check. + relation_index: optional relation index to check the database + (default: 0 - first relation). + + PostgreSQL only. + """ + # Psycopg 3 is imported locally to avoid the need of its package installation + # when relating to a database charm other than PostgreSQL. + import psycopg + + # Return False if no relation is established. + if len(self.relations) == 0: + return False + + relation_id = self.relations[relation_index].id + host = self.fetch_relation_field(relation_id, "endpoints") + + # Return False if there is no endpoint available. + if host is None: + return False + + host = host.split(":")[0] + + content = self.fetch_relation_data([relation_id], ["username", "password"]).get( + relation_id, {} + ) + user = content.get("username") + password = content.get("password") + + connection_string = ( + f"host='{host}' dbname='{self.database}' user='{user}' password='{password}'" + ) + try: + with psycopg.connect(connection_string) as connection: + with connection.cursor() as cursor: + cursor.execute( + "SELECT TRUE FROM pg_extension WHERE extname=%s::text;", (plugin,) + ) + return cursor.fetchone() is not None + except psycopg.Error as e: + logger.exception( + f"failed to check whether {plugin} plugin is enabled in the database: %s", str(e) + ) + return False + + +class DatabaseRequirerEventHandlers(RequirerEventHandlers): + """Requires-side of the relation.""" + + on = DatabaseRequiresEvents() # pyright: ignore [reportAssignmentType] + + def __init__( + self, charm: CharmBase, relation_data: DatabaseRequirerData, unique_key: str = "" + ): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + # Define custom event names for each alias. + if self.relation_data.relations_aliases: + # Ensure the number of aliases does not exceed the maximum + # of connections allowed in the specific relation. + relation_connection_limit = self.charm.meta.requires[ + self.relation_data.relation_name + ].limit + if len(self.relation_data.relations_aliases) != relation_connection_limit: + raise ValueError( + f"The number of aliases must match the maximum number of connections allowed in the relation. " + f"Expected {relation_connection_limit}, got {len(self.relation_data.relations_aliases)}" + ) + + if self.relation_data.relations_aliases: + for relation_alias in self.relation_data.relations_aliases: + self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent) + self.on.define_event( + f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent + ) + self.on.define_event( + f"{relation_alias}_read_only_endpoints_changed", + DatabaseReadOnlyEndpointsChangedEvent, + ) + + def _on_secret_changed_event(self, event: SecretChangedEvent): + """Event notifying about a new value of a secret.""" + pass + + def _assign_relation_alias(self, relation_id: int) -> None: + """Assigns an alias to a relation. + + This function writes in the unit data bag. + + Args: + relation_id: the identifier for a particular relation. + """ + # If no aliases were provided, return immediately. + if not self.relation_data.relations_aliases: + return + + # Return if an alias was already assigned to this relation + # (like when there are more than one unit joining the relation). + relation = self.charm.model.get_relation(self.relation_data.relation_name, relation_id) + if relation and relation.data[self.relation_data.local_unit].get("alias"): + return + + # Retrieve the available aliases (the ones that weren't assigned to any relation). + available_aliases = self.relation_data.relations_aliases[:] + for relation in self.charm.model.relations[self.relation_data.relation_name]: + alias = relation.data[self.relation_data.local_unit].get("alias") + if alias: + logger.debug("Alias %s was already assigned to relation %d", alias, relation.id) + available_aliases.remove(alias) + + # Set the alias in the unit relation databag of the specific relation. + relation = self.charm.model.get_relation(self.relation_data.relation_name, relation_id) + if relation: + relation.data[self.relation_data.local_unit].update({"alias": available_aliases[0]}) + + # We need to set relation alias also on the application level so, + # it will be accessible in show-unit juju command, executed for a consumer application unit + if self.relation_data.local_unit.is_leader(): + self.relation_data.update_relation_data(relation_id, {"alias": available_aliases[0]}) + + def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None: + """Emit an aliased event to a particular relation if it has an alias. + + Args: + event: the relation changed event that was received. + event_name: the name of the event to emit. + """ + alias = self._get_relation_alias(event.relation.id) + if alias: + getattr(self.on, f"{alias}_{event_name}").emit( + event.relation, app=event.app, unit=event.unit + ) + + def _get_relation_alias(self, relation_id: int) -> Optional[str]: + """Returns the relation alias. + + Args: + relation_id: the identifier for a particular relation. + + Returns: + the relation alias or None if the relation was not found. + """ + for relation in self.charm.model.relations[self.relation_data.relation_name]: + if relation.id == relation_id: + return relation.data[self.relation_data.local_unit].get("alias") + return None + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the database relation is created.""" + super()._on_relation_created_event(event) + + # If relations aliases were provided, assign one to the relation. + self._assign_relation_alias(event.relation.id) + + # Sets both database and extra user roles in the relation + # if the roles are provided. Otherwise, sets only the database. + if not self.relation_data.local_unit.is_leader(): + return + + event_data = {"database": self.relation_data.database} + + if self.relation_data.extra_user_roles: + event_data["extra-user-roles"] = self.relation_data.extra_user_roles + + # set external-node-connectivity field + if self.relation_data.external_node_connectivity: + event_data["external-node-connectivity"] = "true" + + self.relation_data.update_relation_data(event.relation.id, event_data) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the database relation has changed.""" + is_subordinate = False + remote_unit_data = None + for key in event.relation.data.keys(): + if isinstance(key, Unit) and not key.name.startswith(self.charm.app.name): + remote_unit_data = event.relation.data[key] + elif isinstance(key, Application) and key.name != self.charm.app.name: + is_subordinate = event.relation.data[key].get("subordinated") == "true" + + if is_subordinate: + if not remote_unit_data: + return + + if remote_unit_data.get("state") != "ready": + return + + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Register all new secrets with their labels + if any(newval for newval in diff.added if self.relation_data._is_secret_field(newval)): + self.relation_data._register_secrets_to_relation(event.relation, diff.added) + + # Check if the database is created + # (the database charm shared the credentials). + secret_field_user = self.relation_data._generate_secret_field_name(SECRET_GROUPS.USER) + if ( + "username" in diff.added and "password" in diff.added + ) or secret_field_user in diff.added: + # Emit the default event (the one without an alias). + logger.info("database created at %s", datetime.now()) + getattr(self.on, "database_created").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "database_created") + + # To avoid unnecessary application restarts do not trigger + # “endpoints_changed“ event if “database_created“ is triggered. + return + + # Emit an endpoints changed event if the database + # added or changed this info in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + getattr(self.on, "endpoints_changed").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "endpoints_changed") + + # To avoid unnecessary application restarts do not trigger + # “read_only_endpoints_changed“ event if “endpoints_changed“ is triggered. + return + + # Emit a read only endpoints changed event if the database + # added or changed this info in the relation databag. + if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("read-only-endpoints changed on %s", datetime.now()) + getattr(self.on, "read_only_endpoints_changed").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "read_only_endpoints_changed") + + +class DatabaseRequires(DatabaseRequirerData, DatabaseRequirerEventHandlers): + """Provider-side of the database relations.""" + + def __init__( + self, + charm: CharmBase, + relation_name: str, + database_name: str, + extra_user_roles: Optional[str] = None, + relations_aliases: Optional[List[str]] = None, + additional_secret_fields: Optional[List[str]] = [], + external_node_connectivity: bool = False, + ): + DatabaseRequirerData.__init__( + self, + charm.model, + relation_name, + database_name, + extra_user_roles, + relations_aliases, + additional_secret_fields, + external_node_connectivity, + ) + DatabaseRequirerEventHandlers.__init__(self, charm, self) + + +################################################################################ +# Charm-specific Relations Data and Events +################################################################################ + +# Kafka Events + + +class KafkaProvidesEvent(RelationEvent): + """Base class for Kafka events.""" + + @property + def topic(self) -> Optional[str]: + """Returns the topic that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("topic") + + @property + def consumer_group_prefix(self) -> Optional[str]: + """Returns the consumer-group-prefix that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("consumer-group-prefix") + + +class TopicRequestedEvent(KafkaProvidesEvent, ExtraRoleEvent): + """Event emitted when a new topic is requested for use on this relation.""" + + +class KafkaProvidesEvents(CharmEvents): + """Kafka events. + + This class defines the events that the Kafka can emit. + """ + + topic_requested = EventSource(TopicRequestedEvent) + + +class KafkaRequiresEvent(RelationEvent): + """Base class for Kafka events.""" + + @property + def topic(self) -> Optional[str]: + """Returns the topic.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("topic") + + @property + def bootstrap_server(self) -> Optional[str]: + """Returns a comma-separated list of broker uris.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("endpoints") + + @property + def consumer_group_prefix(self) -> Optional[str]: + """Returns the consumer-group-prefix.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("consumer-group-prefix") + + @property + def zookeeper_uris(self) -> Optional[str]: + """Returns a comma separated list of Zookeeper uris.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("zookeeper-uris") + + +class TopicCreatedEvent(AuthenticationEvent, KafkaRequiresEvent): + """Event emitted when a new topic is created for use on this relation.""" + + +class BootstrapServerChangedEvent(AuthenticationEvent, KafkaRequiresEvent): + """Event emitted when the bootstrap server is changed.""" + + +class KafkaRequiresEvents(CharmEvents): + """Kafka events. + + This class defines the events that the Kafka can emit. + """ + + topic_created = EventSource(TopicCreatedEvent) + bootstrap_server_changed = EventSource(BootstrapServerChangedEvent) + + +# Kafka Provides and Requires + + +class KafkaProviderData(ProviderData): + """Provider-side of the Kafka relation.""" + + def __init__(self, model: Model, relation_name: str) -> None: + super().__init__(model, relation_name) + + def set_topic(self, relation_id: int, topic: str) -> None: + """Set topic name in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + topic: the topic name. + """ + self.update_relation_data(relation_id, {"topic": topic}) + + def set_bootstrap_server(self, relation_id: int, bootstrap_server: str) -> None: + """Set the bootstrap server in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + bootstrap_server: the bootstrap server address. + """ + self.update_relation_data(relation_id, {"endpoints": bootstrap_server}) + + def set_consumer_group_prefix(self, relation_id: int, consumer_group_prefix: str) -> None: + """Set the consumer group prefix in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + consumer_group_prefix: the consumer group prefix string. + """ + self.update_relation_data(relation_id, {"consumer-group-prefix": consumer_group_prefix}) + + def set_zookeeper_uris(self, relation_id: int, zookeeper_uris: str) -> None: + """Set the zookeeper uris in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + zookeeper_uris: comma-separated list of ZooKeeper server uris. + """ + self.update_relation_data(relation_id, {"zookeeper-uris": zookeeper_uris}) + + +class KafkaProviderEventHandlers(EventHandlers): + """Provider-side of the Kafka relation.""" + + on = KafkaProvidesEvents() # pyright: ignore [reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: KafkaProviderData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Leader only + if not self.relation_data.local_unit.is_leader(): + return + + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit a topic requested event if the setup key (topic name and optional + # extra user roles) was added to the relation databag by the application. + if "topic" in diff.added: + getattr(self.on, "topic_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + +class KafkaProvides(KafkaProviderData, KafkaProviderEventHandlers): + """Provider-side of the Kafka relation.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + KafkaProviderData.__init__(self, charm.model, relation_name) + KafkaProviderEventHandlers.__init__(self, charm, self) + + +class KafkaRequirerData(RequirerData): + """Requirer-side of the Kafka relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + topic: str, + extra_user_roles: Optional[str] = None, + consumer_group_prefix: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ): + """Manager of Kafka client relations.""" + super().__init__(model, relation_name, extra_user_roles, additional_secret_fields) + self.topic = topic + self.consumer_group_prefix = consumer_group_prefix or "" + + @property + def topic(self): + """Topic to use in Kafka.""" + return self._topic + + @topic.setter + def topic(self, value): + # Avoid wildcards + if value == "*": + raise ValueError(f"Error on topic '{value}', cannot be a wildcard.") + self._topic = value + + +class KafkaRequirerEventHandlers(RequirerEventHandlers): + """Requires-side of the Kafka relation.""" + + on = KafkaRequiresEvents() # pyright: ignore [reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: KafkaRequirerData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the Kafka relation is created.""" + super()._on_relation_created_event(event) + + if not self.relation_data.local_unit.is_leader(): + return + + # Sets topic, extra user roles, and "consumer-group-prefix" in the relation + relation_data = {"topic": self.relation_data.topic} + + if self.relation_data.extra_user_roles: + relation_data["extra-user-roles"] = self.relation_data.extra_user_roles + + if self.relation_data.consumer_group_prefix: + relation_data["consumer-group-prefix"] = self.relation_data.consumer_group_prefix + + self.relation_data.update_relation_data(event.relation.id, relation_data) + + def _on_secret_changed_event(self, event: SecretChangedEvent): + """Event notifying about a new value of a secret.""" + pass + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the Kafka relation has changed.""" + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Check if the topic is created + # (the Kafka charm shared the credentials). + + # Register all new secrets with their labels + if any(newval for newval in diff.added if self.relation_data._is_secret_field(newval)): + self.relation_data._register_secrets_to_relation(event.relation, diff.added) + + secret_field_user = self.relation_data._generate_secret_field_name(SECRET_GROUPS.USER) + if ( + "username" in diff.added and "password" in diff.added + ) or secret_field_user in diff.added: + # Emit the default event (the one without an alias). + logger.info("topic created at %s", datetime.now()) + getattr(self.on, "topic_created").emit(event.relation, app=event.app, unit=event.unit) + + # To avoid unnecessary application restarts do not trigger + # “endpoints_changed“ event if “topic_created“ is triggered. + return + + # Emit an endpoints (bootstrap-server) changed event if the Kafka endpoints + # added or changed this info in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + getattr(self.on, "bootstrap_server_changed").emit( + event.relation, app=event.app, unit=event.unit + ) # here check if this is the right design + return + + +class KafkaRequires(KafkaRequirerData, KafkaRequirerEventHandlers): + """Provider-side of the Kafka relation.""" + + def __init__( + self, + charm: CharmBase, + relation_name: str, + topic: str, + extra_user_roles: Optional[str] = None, + consumer_group_prefix: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ) -> None: + KafkaRequirerData.__init__( + self, + charm.model, + relation_name, + topic, + extra_user_roles, + consumer_group_prefix, + additional_secret_fields, + ) + KafkaRequirerEventHandlers.__init__(self, charm, self) + + +# Opensearch related events + + +class OpenSearchProvidesEvent(RelationEvent): + """Base class for OpenSearch events.""" + + @property + def index(self) -> Optional[str]: + """Returns the index that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("index") + + +class IndexRequestedEvent(OpenSearchProvidesEvent, ExtraRoleEvent): + """Event emitted when a new index is requested for use on this relation.""" + + +class OpenSearchProvidesEvents(CharmEvents): + """OpenSearch events. + + This class defines the events that OpenSearch can emit. + """ + + index_requested = EventSource(IndexRequestedEvent) + + +class OpenSearchRequiresEvent(DatabaseRequiresEvent): + """Base class for OpenSearch requirer events.""" + + +class IndexCreatedEvent(AuthenticationEvent, OpenSearchRequiresEvent): + """Event emitted when a new index is created for use on this relation.""" + + +class OpenSearchRequiresEvents(CharmEvents): + """OpenSearch events. + + This class defines the events that the opensearch requirer can emit. + """ + + index_created = EventSource(IndexCreatedEvent) + endpoints_changed = EventSource(DatabaseEndpointsChangedEvent) + authentication_updated = EventSource(AuthenticationEvent) + + +# OpenSearch Provides and Requires Objects + + +class OpenSearchProvidesData(ProviderData): + """Provider-side of the OpenSearch relation.""" + + def __init__(self, model: Model, relation_name: str) -> None: + super().__init__(model, relation_name) + + def set_index(self, relation_id: int, index: str) -> None: + """Set the index in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + index: the index as it is _created_ on the provider charm. This needn't match the + requested index, and can be used to present a different index name if, for example, + the requested index is invalid. + """ + self.update_relation_data(relation_id, {"index": index}) + + def set_endpoints(self, relation_id: int, endpoints: str) -> None: + """Set the endpoints in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + endpoints: the endpoint addresses for opensearch nodes. + """ + self.update_relation_data(relation_id, {"endpoints": endpoints}) + + def set_version(self, relation_id: int, version: str) -> None: + """Set the opensearch version in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + version: database version. + """ + self.update_relation_data(relation_id, {"version": version}) + + +class OpenSearchProvidesEventHandlers(EventHandlers): + """Provider-side of the OpenSearch relation.""" + + on = OpenSearchProvidesEvents() # pyright: ignore[reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: OpenSearchProvidesData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Leader only + if not self.relation_data.local_unit.is_leader(): + return + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit an index requested event if the setup key (index name and optional extra user roles) + # have been added to the relation databag by the application. + if "index" in diff.added: + getattr(self.on, "index_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + +class OpenSearchProvides(OpenSearchProvidesData, OpenSearchProvidesEventHandlers): + """Provider-side of the OpenSearch relation.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + OpenSearchProvidesData.__init__(self, charm.model, relation_name) + OpenSearchProvidesEventHandlers.__init__(self, charm, self) + + +class OpenSearchRequiresData(RequirerData): + """Requires data side of the OpenSearch relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + index: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ): + """Manager of OpenSearch client relations.""" + super().__init__(model, relation_name, extra_user_roles, additional_secret_fields) + self.index = index + + +class OpenSearchRequiresEventHandlers(RequirerEventHandlers): + """Requires events side of the OpenSearch relation.""" + + on = OpenSearchRequiresEvents() # pyright: ignore[reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: OpenSearchRequiresData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the OpenSearch relation is created.""" + super()._on_relation_created_event(event) + + if not self.relation_data.local_unit.is_leader(): + return + + # Sets both index and extra user roles in the relation if the roles are provided. + # Otherwise, sets only the index. + data = {"index": self.relation_data.index} + if self.relation_data.extra_user_roles: + data["extra-user-roles"] = self.relation_data.extra_user_roles + + self.relation_data.update_relation_data(event.relation.id, data) + + def _on_secret_changed_event(self, event: SecretChangedEvent): + """Event notifying about a new value of a secret.""" + if not event.secret.label: + return + + relation = self.relation_data._relation_from_secret_label(event.secret.label) + if not relation: + logging.info( + f"Received secret {event.secret.label} but couldn't parse, seems irrelevant" + ) + return + + if relation.app == self.charm.app: + logging.info("Secret changed event ignored for Secret Owner") + + remote_unit = None + for unit in relation.units: + if unit.app != self.charm.app: + remote_unit = unit + + logger.info("authentication updated") + getattr(self.on, "authentication_updated").emit( + relation, app=relation.app, unit=remote_unit + ) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the OpenSearch relation has changed. + + This event triggers individual custom events depending on the changing relation. + """ + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Register all new secrets with their labels + if any(newval for newval in diff.added if self.relation_data._is_secret_field(newval)): + self.relation_data._register_secrets_to_relation(event.relation, diff.added) + + secret_field_user = self.relation_data._generate_secret_field_name(SECRET_GROUPS.USER) + secret_field_tls = self.relation_data._generate_secret_field_name(SECRET_GROUPS.TLS) + updates = {"username", "password", "tls", "tls-ca", secret_field_user, secret_field_tls} + if len(set(diff._asdict().keys()) - updates) < len(diff): + logger.info("authentication updated at: %s", datetime.now()) + getattr(self.on, "authentication_updated").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Check if the index is created + # (the OpenSearch charm shares the credentials). + if ( + "username" in diff.added and "password" in diff.added + ) or secret_field_user in diff.added: + # Emit the default event (the one without an alias). + logger.info("index created at: %s", datetime.now()) + getattr(self.on, "index_created").emit(event.relation, app=event.app, unit=event.unit) + + # To avoid unnecessary application restarts do not trigger + # “endpoints_changed“ event if “index_created“ is triggered. + return + + # Emit a endpoints changed event if the OpenSearch application added or changed this info + # in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + getattr(self.on, "endpoints_changed").emit( + event.relation, app=event.app, unit=event.unit + ) # here check if this is the right design + return + + +class OpenSearchRequires(OpenSearchRequiresData, OpenSearchRequiresEventHandlers): + """Requires-side of the OpenSearch relation.""" + + def __init__( + self, + charm: CharmBase, + relation_name: str, + index: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ) -> None: + OpenSearchRequiresData.__init__( + self, + charm.model, + relation_name, + index, + extra_user_roles, + additional_secret_fields, + ) + OpenSearchRequiresEventHandlers.__init__(self, charm, self) diff --git a/lib/charms/operator_libs_linux/v1/systemd.py b/lib/charms/operator_libs_linux/v1/systemd.py new file mode 100644 index 0000000..cdcbad6 --- /dev/null +++ b/lib/charms/operator_libs_linux/v1/systemd.py @@ -0,0 +1,288 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Abstractions for stopping, starting and managing system services via systemd. + +This library assumes that your charm is running on a platform that uses systemd. E.g., +Centos 7 or later, Ubuntu Xenial (16.04) or later. + +For the most part, we transparently provide an interface to a commonly used selection of +systemd commands, with a few shortcuts baked in. For example, service_pause and +service_resume with run the mask/unmask and enable/disable invocations. + +Example usage: + +```python +from charms.operator_libs_linux.v0.systemd import service_running, service_reload + +# Start a service +if not service_running("mysql"): + success = service_start("mysql") + +# Attempt to reload a service, restarting if necessary +success = service_reload("nginx", restart_on_failure=True) +``` +""" + +__all__ = [ # Don't export `_systemctl`. (It's not the intended way of using this lib.) + "SystemdError", + "daemon_reload", + "service_disable", + "service_enable", + "service_failed", + "service_pause", + "service_reload", + "service_restart", + "service_resume", + "service_running", + "service_start", + "service_stop", +] + +import logging +import subprocess + +logger = logging.getLogger(__name__) + +# The unique Charmhub library identifier, never change it +LIBID = "045b0d179f6b4514a8bb9b48aee9ebaf" + +# Increment this major API version when introducing breaking changes +LIBAPI = 1 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 4 + + +class SystemdError(Exception): + """Custom exception for SystemD related errors.""" + + +def _systemctl(*args: str, check: bool = False) -> int: + """Control a system service using systemctl. + + Args: + *args: Arguments to pass to systemctl. + check: Check the output of the systemctl command. Default: False. + + Returns: + Returncode of systemctl command execution. + + Raises: + SystemdError: Raised if calling systemctl returns a non-zero returncode and check is True. + """ + cmd = ["systemctl", *args] + logger.debug(f"Executing command: {cmd}") + try: + proc = subprocess.run( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + bufsize=1, + encoding="utf-8", + check=check, + ) + logger.debug( + f"Command {cmd} exit code: {proc.returncode}. systemctl output:\n{proc.stdout}" + ) + return proc.returncode + except subprocess.CalledProcessError as e: + raise SystemdError( + f"Command {cmd} failed with returncode {e.returncode}. systemctl output:\n{e.stdout}" + ) + + +def service_running(service_name: str) -> bool: + """Report whether a system service is running. + + Args: + service_name: The name of the service to check. + + Return: + True if service is running/active; False if not. + """ + # If returncode is 0, this means that is service is active. + return _systemctl("--quiet", "is-active", service_name) == 0 + + +def service_failed(service_name: str) -> bool: + """Report whether a system service has failed. + + Args: + service_name: The name of the service to check. + + Returns: + True if service is marked as failed; False if not. + """ + # If returncode is 0, this means that the service has failed. + return _systemctl("--quiet", "is-failed", service_name) == 0 + + +def service_start(*args: str) -> bool: + """Start a system service. + + Args: + *args: Arguments to pass to `systemctl start` (normally the service name). + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if `systemctl start ...` returns a non-zero returncode. + """ + return _systemctl("start", *args, check=True) == 0 + + +def service_stop(*args: str) -> bool: + """Stop a system service. + + Args: + *args: Arguments to pass to `systemctl stop` (normally the service name). + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if `systemctl stop ...` returns a non-zero returncode. + """ + return _systemctl("stop", *args, check=True) == 0 + + +def service_restart(*args: str) -> bool: + """Restart a system service. + + Args: + *args: Arguments to pass to `systemctl restart` (normally the service name). + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if `systemctl restart ...` returns a non-zero returncode. + """ + return _systemctl("restart", *args, check=True) == 0 + + +def service_enable(*args: str) -> bool: + """Enable a system service. + + Args: + *args: Arguments to pass to `systemctl enable` (normally the service name). + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if `systemctl enable ...` returns a non-zero returncode. + """ + return _systemctl("enable", *args, check=True) == 0 + + +def service_disable(*args: str) -> bool: + """Disable a system service. + + Args: + *args: Arguments to pass to `systemctl disable` (normally the service name). + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if `systemctl disable ...` returns a non-zero returncode. + """ + return _systemctl("disable", *args, check=True) == 0 + + +def service_reload(service_name: str, restart_on_failure: bool = False) -> bool: + """Reload a system service, optionally falling back to restart if reload fails. + + Args: + service_name: The name of the service to reload. + restart_on_failure: + Boolean indicating whether to fall back to a restart if the reload fails. + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if `systemctl reload|restart ...` returns a non-zero returncode. + """ + try: + return _systemctl("reload", service_name, check=True) == 0 + except SystemdError: + if restart_on_failure: + return service_restart(service_name) + else: + raise + + +def service_pause(service_name: str) -> bool: + """Pause a system service. + + Stops the service and prevents the service from starting again at boot. + + Args: + service_name: The name of the service to pause. + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if service is still running after being paused by systemctl. + """ + _systemctl("disable", "--now", service_name) + _systemctl("mask", service_name) + + if service_running(service_name): + raise SystemdError(f"Attempted to pause {service_name!r}, but it is still running.") + + return True + + +def service_resume(service_name: str) -> bool: + """Resume a system service. + + Re-enable starting the service again at boot. Start the service. + + Args: + service_name: The name of the service to resume. + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if service is not running after being resumed by systemctl. + """ + _systemctl("unmask", service_name) + _systemctl("enable", "--now", service_name) + + if not service_running(service_name): + raise SystemdError(f"Attempted to resume {service_name!r}, but it is not running.") + + return True + + +def daemon_reload() -> bool: + """Reload systemd manager configuration. + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if `systemctl daemon-reload` returns a non-zero returncode. + """ + return _systemctl("daemon-reload", check=True) == 0 diff --git a/metadata.yaml b/metadata.yaml index 437dea8..92bdcd2 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -21,3 +21,7 @@ assumes: provides: irc-bridge: interface: irc_bridge +requires: + database: + interface: postgresql_client + limit: 1 diff --git a/requirements.txt b/requirements.txt index a46b0b5..1e6d29c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ ops==2.16.1 -pydantic==2.7.3 +pydantic==2.8.2 \ No newline at end of file diff --git a/src-docs/charm.py.md b/src-docs/charm.py.md new file mode 100644 index 0000000..4bdeb1e --- /dev/null +++ b/src-docs/charm.py.md @@ -0,0 +1,92 @@ + + + + +# module `charm.py` +Charm for irc-bridge. + +**Global Variables** +--------------- +- **DATABASE_RELATION_NAME** +- **MATRIX_RELATION_NAME** + + +--- + +## class `IRCCharm` +Charm the irc bridge service. + + + +### function `__init__` + +```python +__init__(*args: Any) +``` + +Construct. + + + +**Args:** + + - `args`: Arguments passed to the CharmBase parent constructor. + + +--- + +#### property app + +Application that this unit is part of. + +--- + +#### property charm_dir + +Root directory of the charm as it is running. + +--- + +#### property config + +A mapping containing the charm's config and current values. + +--- + +#### property meta + +Metadata of this charm. + +--- + +#### property model + +Shortcut for more simple access the model. + +--- + +#### property unit + +Unit that this execution is responsible for. + + + +--- + + + +### function `reconcile` + +```python +reconcile() → None +``` + +Reconcile the charm. + +This is a more simple approach to reconciliation, adapted from Charming Complexity sans state and observers. + +Being a simple charm, we don't need to do much here. + +Ensure we have a database relation, ensure we have a relation to matrix, populate database connection string and matrix homeserver URL in the config template and (re)start the service. + + diff --git a/src-docs/charm_types.py.md b/src-docs/charm_types.py.md new file mode 100644 index 0000000..9719956 --- /dev/null +++ b/src-docs/charm_types.py.md @@ -0,0 +1,179 @@ + + + + +# module `charm_types.py` +Type definitions for the Synapse charm. + + + +--- + +## class `CharmConfig` +A named tuple representing an IRC configuration. + + + +**Attributes:** + + - `ident_enabled`: Whether IRC ident is enabled. + - `bot_nickname`: Bot nickname. + - `bridge_admins`: Bridge admins. + + +--- + +#### property model_extra + +Get extra fields set during validation. + + + +**Returns:** + A dictionary of extra fields, or `None` if `config.extra` is not set to `"allow"`. + +--- + +#### property model_fields_set + +Returns the set of fields that have been explicitly set on this model instance. + + + +**Returns:** + A set of strings representing the fields that have been set, i.e. that were not filled from defaults. + + + +--- + + + +### classmethod `userids_to_list` + +```python +userids_to_list(value: str) → List[str] +``` + +Convert a comma separated list of users to list. + + + +**Args:** + + - `value`: the input value. + + + +**Returns:** + The string converted to list. + + + +**Raises:** + + - `ValidationError`: if user_id is not as expected. + + +--- + +## class `DatasourceMatrix` +A named tuple representing a Datasource Matrix. + + + +**Attributes:** + + - `host`: Host (IP or DNS without port or protocol). + + +--- + +#### property model_extra + +Get extra fields set during validation. + + + +**Returns:** + A dictionary of extra fields, or `None` if `config.extra` is not set to `"allow"`. + +--- + +#### property model_fields_set + +Returns the set of fields that have been explicitly set on this model instance. + + + +**Returns:** + A set of strings representing the fields that have been set, i.e. that were not filled from defaults. + + + + +--- + +## class `DatasourcePostgreSQL` +A named tuple representing a Datasource PostgreSQL. + + + +**Attributes:** + + - `user`: User. + - `password`: Password. + - `host`: Host (IP or DNS without port or protocol). + - `port`: Port. + - `db`: Database name. + - `uri`: Database connection URI. + + +--- + +#### property model_extra + +Get extra fields set during validation. + + + +**Returns:** + A dictionary of extra fields, or `None` if `config.extra` is not set to `"allow"`. + +--- + +#### property model_fields_set + +Returns the set of fields that have been explicitly set on this model instance. + + + +**Returns:** + A set of strings representing the fields that have been set, i.e. that were not filled from defaults. + + + +--- + + + +### classmethod `from_relation` + +```python +from_relation(relation: Relation) → DatasourcePostgreSQL +``` + +Create a DatasourcePostgreSQL from a relation. + + + +**Args:** + + - `relation`: The relation to get the data from. + + + +**Returns:** + A DatasourcePostgreSQL instance. + + diff --git a/src-docs/constants.py.md b/src-docs/constants.py.md new file mode 100644 index 0000000..7537950 --- /dev/null +++ b/src-docs/constants.py.md @@ -0,0 +1,19 @@ + + + + +# module `constants.py` +File containing constants to be used in the charm. + +**Global Variables** +--------------- +- **IRC_BRIDGE_HEALTH_PORT** +- **IRC_BRIDGE_KEY_ALGO** +- **IRC_BRIDGE_KEY_OPTS** +- **DATABASE_NAME** +- **DATABASE_RELATION_NAME** +- **MATRIX_RELATION_NAME** +- **IRC_BRIDGE_SNAP_NAME** +- **SNAP_PACKAGES** + + diff --git a/src-docs/database_observer.py.md b/src-docs/database_observer.py.md new file mode 100644 index 0000000..9a3d8fb --- /dev/null +++ b/src-docs/database_observer.py.md @@ -0,0 +1,69 @@ + + + + +# module `database_observer.py` +Provide the DatabaseObserver class to handle database relation and state. + +**Global Variables** +--------------- +- **DATABASE_NAME** + + +--- + +## class `DatabaseObserver` +The Database relation observer. + + + +**Attributes:** + + - `relation_name`: The name of the relation to observe. + - `database`: The database relation interface. + + + +### function `__init__` + +```python +__init__(charm: CharmBase, relation_name: str) +``` + +Initialize the oserver and register event handlers. + + + +**Args:** + + - `charm`: The parent charm to attach the observer to. + - `relation_name`: The name of the relation to observe. + + +--- + +#### property model + +Shortcut for more simple access the model. + + + +--- + + + +### function `get_db` + +```python +get_db() → Optional[DatasourcePostgreSQL] +``` + +Return a postgresql datasource model. + + + +**Returns:** + + - `DatasourcePostgreSQL`: The datasource model. + + diff --git a/src-docs/exceptions.py.md b/src-docs/exceptions.py.md new file mode 100644 index 0000000..7789192 --- /dev/null +++ b/src-docs/exceptions.py.md @@ -0,0 +1,90 @@ + + + + +# module `exceptions.py` +Exceptions used by the irc-bridge charm. + + + +--- + +## class `RelationDataError` +Exception raised when we don't have the expected data in the relation or no relation. + +Attrs: msg (str): Explanation of the error. + + + +### function `__init__` + +```python +__init__(msg: str) +``` + +Initialize a new instance of the RelationDataError exception. + + + +**Args:** + + - `msg` (str): Explanation of the error. + + + + + +--- + +## class `SnapError` +Exception raised when an action on the snap fails. + +Attrs: msg (str): Explanation of the error. + + + +### function `__init__` + +```python +__init__(msg: str) +``` + +Initialize a new instance of the SnapError exception. + + + +**Args:** + + - `msg` (str): Explanation of the error. + + + + + +--- + +## class `SystemdError` +Exception raised when an action on the systemd service fails. + +Attrs: msg (str): Explanation of the error. + + + +### function `__init__` + +```python +__init__(msg: str) +``` + +Initialize a new instance of the SystemdError exception. + + + +**Args:** + + - `msg` (str): Explanation of the error. + + + + + diff --git a/src-docs/irc.py.md b/src-docs/irc.py.md new file mode 100644 index 0000000..a201c85 --- /dev/null +++ b/src-docs/irc.py.md @@ -0,0 +1,186 @@ + + + + +# module `irc.py` +IRC Bridge charm business logic. + +**Global Variables** +--------------- +- **IRC_BRIDGE_HEALTH_PORT** +- **IRC_BRIDGE_KEY_ALGO** +- **IRC_BRIDGE_KEY_OPTS** +- **IRC_BRIDGE_SNAP_NAME** +- **SNAP_PACKAGES** + + +--- + +## class `IRCBridgeService` +IRC Bridge service class. + +This class provides the necessary methods to manage the matrix-appservice-irc service. The service requires a connection to a (PostgreSQL) database and to a Matrix homeserver. Both of these will be part of the configuration file created by this class. Once the configuration file is created, a PEM file will be generated and an app registration file. The app registration file will be used to register the bridge with the Matrix homeserver. PEM and the configuration file will be used by the matrix-appservice-irc service. + + + + +--- + + + +### function `configure` + +```python +configure( + db: DatasourcePostgreSQL, + matrix: DatasourceMatrix, + config: CharmConfig +) → None +``` + +Configure the service. + + + +**Args:** + + - `db`: the database configuration + - `matrix`: the matrix configuration + - `config`: the charm configuration + +--- + + + +### function `prepare` + +```python +prepare() → None +``` + +Prepare the machine. + +Install the snap package and create the configuration directory and file. + +--- + + + +### function `reconcile` + +```python +reconcile( + db: DatasourcePostgreSQL, + matrix: DatasourceMatrix, + config: CharmConfig +) → None +``` + +Reconcile the service. + +Simple flow: +- Check if the snap is installed +- Check if the configuration files exist +- Check if the service is running + + + +**Args:** + + - `db`: the database configuration + - `matrix`: the matrix configuration + - `config`: the charm configuration + +--- + + + +### function `reload` + +```python +reload() → None +``` + +Reload the matrix-appservice-irc service. + +Check if the service is running and reload it. + + + +**Raises:** + + - `ReloadError`: when encountering a SnapError + +--- + + + +### function `start` + +```python +start() → None +``` + +Start the matrix-appservice-irc service. + + + +**Raises:** + + - `StartError`: when encountering a SnapError + +--- + + + +### function `stop` + +```python +stop() → None +``` + +Stop the matrix-appservice-irc service. + + + +**Raises:** + + - `StopError`: when encountering a SnapError + + +--- + +## class `InstallError` +Exception raised when unable to install dependencies for the service. + + + + + +--- + +## class `ReloadError` +Exception raised when unable to reload the service. + + + + + +--- + +## class `StartError` +Exception raised when unable to start the service. + + + + + +--- + +## class `StopError` +Exception raised when unable to stop the service. + + + + + diff --git a/src-docs/matrix_observer.py.md b/src-docs/matrix_observer.py.md new file mode 100644 index 0000000..c0b1ffb --- /dev/null +++ b/src-docs/matrix_observer.py.md @@ -0,0 +1,59 @@ + + + + +# module `matrix_observer.py` +Provide the DatabaseObserver class to handle database relation and state. + + + +--- + +## class `MatrixObserver` +The Matrix relation observer. + + + +### function `__init__` + +```python +__init__(charm: CharmBase, relation_name: str) +``` + +Initialize the oserver and register event handlers. + + + +**Args:** + + - `charm`: The parent charm to attach the observer to. + - `relation_name`: The name of the relation to observe + + +--- + +#### property model + +Shortcut for more simple access the model. + + + +--- + + + +### function `reconcile` + +```python +reconcile() → Optional[DatasourceMatrix] +``` + +Reconcile the database relation. + + + +**Returns:** + + - `Dict`: Information needed for setting environment variables. + + diff --git a/src/charm.py b/src/charm.py index d22f01d..76d45fd 100755 --- a/src/charm.py +++ b/src/charm.py @@ -8,14 +8,19 @@ import typing import ops +from pydantic import ValidationError -from irc import IRCService +from charm_types import CharmConfig +from constants import DATABASE_RELATION_NAME, MATRIX_RELATION_NAME +from database_observer import DatabaseObserver +from irc import IRCBridgeService +from matrix_observer import MatrixObserver logger = logging.getLogger(__name__) class IRCCharm(ops.CharmBase): - """Charm the service.""" + """Charm the irc bridge service.""" def __init__(self, *args: typing.Any): """Construct. @@ -24,36 +29,84 @@ def __init__(self, *args: typing.Any): args: Arguments passed to the CharmBase parent constructor. """ super().__init__(*args) - self.irc = IRCService() + self._irc = IRCBridgeService() + self._database = DatabaseObserver(self, DATABASE_RELATION_NAME) + self._matrix = MatrixObserver(self, MATRIX_RELATION_NAME) self.framework.observe(self.on.config_changed, self._on_config_changed) self.framework.observe(self.on.install, self._on_install) + self.framework.observe(self.on.upgrade_charm, self._on_upgrade_charm) self.framework.observe(self.on.start, self._on_start) self.framework.observe(self.on.stop, self._on_stop) - self.framework.observe(self.on.upgrade_charm, self._on_upgrade_charm) def _on_config_changed(self, _: ops.ConfigChangedEvent) -> None: - """Handle changed configuration.""" - self.unit.status = ops.ActiveStatus() + """Handle config changed.""" + self.reconcile() def _on_install(self, _: ops.InstallEvent) -> None: """Handle install.""" - self.unit.status = ops.MaintenanceStatus("Preparing irc bridge") - self.irc.prepare() - self.unit.status = ops.ActiveStatus() + self.reconcile() + + def _on_upgrade_charm(self, _: ops.UpgradeCharmEvent) -> None: + """Handle upgrade charm.""" + self.reconcile() def _on_start(self, _: ops.StartEvent) -> None: """Handle start.""" - self.irc.start() - self.unit.status = ops.ActiveStatus() + self.reconcile() def _on_stop(self, _: ops.StopEvent) -> None: """Handle stop.""" - self.irc.stop() + self.unit.status = ops.MaintenanceStatus("Stopping charm") + self._irc.stop() - def _on_upgrade_charm(self, _: ops.UpgradeCharmEvent) -> None: - """Handle upgrade-charm.""" - self.unit.status = ops.MaintenanceStatus("Upgrading dependencies") - self.irc.prepare() + def _charm_config(self) -> CharmConfig: + """Reconcile the charm. + + Returns: + CharmConfig: The reconciled charm configuration. + """ + return CharmConfig( + ident_enabled=self.model.config.get("ident_enabled", None), + bot_nickname=self.model.config.get("bot_nickname", None), + bridge_admins=self.model.config.get("bridge_admins", None), + ) + + def reconcile(self) -> None: + """Reconcile the charm. + + This is a more simple approach to reconciliation, + adapted from Charming Complexity sans state and observers. + + Being a simple charm, we don't need to do much here. + + Ensure we have a database relation, + ensure we have a relation to matrix, + populate database connection string and matrix homeserver URL + in the config template and (re)start the service. + """ + ops.MaintenanceStatus("Reconciling charm") + try: + logger.info("DB Reconciling charm") + db = self._database.get_db() + if db is None: + self.unit.status = ops.BlockedStatus("Database relation not found") + return + except ValidationError: + self.unit.status = ops.MaintenanceStatus( + "Database configuration missing username, password or URI" + ) + return + logger.info("Matrix Reconciling charm") + matrix = self._matrix.reconcile() + try: + logger.info("Config Reconciling charm") + config = self._charm_config + except ValidationError as e: + self.unit.status = ops.MaintenanceStatus(f"Invalid configuration: {e}") + logger.exception("Invalid configuration: {%s}", e) + return + logger.info("IRC Reconciling charm") + self._irc.reconcile(db, matrix, config) self.unit.status = ops.ActiveStatus() diff --git a/src/charm_types.py b/src/charm_types.py new file mode 100644 index 0000000..78e7c71 --- /dev/null +++ b/src/charm_types.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 + +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Type definitions for the Synapse charm.""" + +import re +import typing + +import ops +from pydantic import BaseModel, Field, ValidationError, validator + + +class DatasourcePostgreSQL(BaseModel): + """A named tuple representing a Datasource PostgreSQL. + + Attributes: + user: User. + password: Password. + host: Host (IP or DNS without port or protocol). + port: Port. + db: Database name. + uri: Database connection URI. + """ + + user: str = Field(min_length=1, description="User") + password: str = Field(min_length=1, description="Password") + host: str = Field(min_length=1, description="Host") + port: str = Field(min_length=1, description="Port") + db: str = Field(min_length=1, description="Database name") + uri: str = Field(min_length=1, description="Database connection URI") + + @classmethod + def from_relation(cls, relation: ops.Relation) -> "DatasourcePostgreSQL": + """Create a DatasourcePostgreSQL from a relation. + + Args: + relation: The relation to get the data from. + + Returns: + A DatasourcePostgreSQL instance. + """ + relation_data = relation.data[relation.app] + user = relation_data.get("username", "") + password = relation_data.get("password", "") + host, port = relation_data.get("endpoints", ":").split(":") + db = relation_data.get("database", "") + uri = f"postgres://{user}:{password}@{host}:{port}/{db}" + + return DatasourcePostgreSQL( + user=user, + password=password, + host=host, + port=port, + db=db, + uri=uri, + ) + + +class DatasourceMatrix(BaseModel): + """A named tuple representing a Datasource Matrix. + + Attributes: + host: Host (IP or DNS without port or protocol). + """ + + host: str + + +class CharmConfig(BaseModel): + """A named tuple representing an IRC configuration. + + Attributes: + ident_enabled: Whether IRC ident is enabled. + bot_nickname: Bot nickname. + bridge_admins: Bridge admins. + """ + + ident_enabled: bool + bot_nickname: str + bridge_admins: str + + @validator("bridge_admins") + @classmethod + def userids_to_list(cls, value: str) -> typing.List[str]: + """Convert a comma separated list of users to list. + + Args: + value: the input value. + + Returns: + The string converted to list. + + Raises: + ValidationError: if user_id is not as expected. + """ + # Based on documentation + # https://spec.matrix.org/v1.10/appendices/#user-identifiers + userid_regex = r"@[a-z0-9._=/+-]+:\w+\.\w+" + if value is None: + return [] + value_list = ["@" + user_id.strip() for user_id in value.split(",")] + for user_id in value_list: + if not re.fullmatch(userid_regex, user_id): + raise ValidationError(f"Invalid user ID format: {user_id}", cls) + return value_list diff --git a/src/constants.py b/src/constants.py index 56fb600..8d57f17 100644 --- a/src/constants.py +++ b/src/constants.py @@ -3,6 +3,36 @@ """File containing constants to be used in the charm.""" +import pathlib + +# App +IRC_BRIDGE_HEALTH_PORT = 5446 +IRC_BRIDGE_KEY_ALGO = "RSA" +IRC_BRIDGE_KEY_OPTS = "rsa_keygen_bits:2048" + +# Database +DATABASE_NAME = "ircbridge" +DATABASE_RELATION_NAME = "database" + +# Paths +IRC_BRIDGE_CONFIG_DIR_PATH = pathlib.Path("/etc/matrix-appservice-irc") +IRC_BRIDGE_TEMPLATE_DIR_PATH = pathlib.Path("templates") +SYSTEMD_DIR_PATH = pathlib.Path("/etc/systemd/system") +IRC_BRIDGE_CONFIG_FILE_PATH = IRC_BRIDGE_CONFIG_DIR_PATH / "config.yaml" +IRC_BRIDGE_TEMPLATE_CONFIG_FILE_PATH = IRC_BRIDGE_TEMPLATE_DIR_PATH / "config.yaml" +IRC_BRIDGE_UNIT_FILE_PATH = SYSTEMD_DIR_PATH / "matrix-appservice-irc.service" +IRC_BRIDGE_TARGET_FILE_PATH = SYSTEMD_DIR_PATH / "matrix-appservice-irc.target" +IRC_BRIDGE_TEMPLATE_UNIT_FILE_PATH = IRC_BRIDGE_TEMPLATE_DIR_PATH / "matrix-appservice-irc.service" +IRC_BRIDGE_TEMPLATE_TARGET_FILE_PATH = ( + IRC_BRIDGE_TEMPLATE_DIR_PATH / "matrix-appservice-irc.target" +) +IRC_BRIDGE_PEM_FILE_PATH = IRC_BRIDGE_CONFIG_DIR_PATH / "irc_passkey.pem" +IRC_BRIDGE_REGISTRATION_FILE_PATH = IRC_BRIDGE_CONFIG_DIR_PATH / "appservice-registration-irc.yaml" + +# Charm +MATRIX_RELATION_NAME = "matrix-plugins" + +# Snap IRC_BRIDGE_SNAP_NAME = "matrix-appservice-irc" SNAP_PACKAGES = { IRC_BRIDGE_SNAP_NAME: {"channel": "edge"}, diff --git a/src/database_observer.py b/src/database_observer.py new file mode 100644 index 0000000..49ad63a --- /dev/null +++ b/src/database_observer.py @@ -0,0 +1,66 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Provide the DatabaseObserver class to handle database relation and state.""" + +import typing + +from charms.data_platform_libs.v0.data_interfaces import ( + DatabaseCreatedEvent, + DatabaseEndpointsChangedEvent, + DatabaseRequires, +) +from ops.charm import CharmBase +from ops.framework import Object + +from charm_types import DatasourcePostgreSQL +from constants import DATABASE_NAME + + +class DatabaseObserver(Object): + """The Database relation observer. + + Attributes: + relation_name: The name of the relation to observe. + database: The database relation interface. + """ + + def __init__(self, charm: CharmBase, relation_name: str): + """Initialize the oserver and register event handlers. + + Args: + charm: The parent charm to attach the observer to. + relation_name: The name of the relation to observe. + """ + super().__init__(charm, "database-observer") + self._charm = charm + self.relation_name = relation_name + self.database = DatabaseRequires( + self._charm, + relation_name=self.relation_name, + database_name=DATABASE_NAME, + ) + self.framework.observe(self.database.on.database_created, self._on_database_created) + self.framework.observe(self.database.on.endpoints_changed, self._on_endpoints_changed) + + def _on_database_created(self, _: DatabaseCreatedEvent) -> None: + """Handle database created.""" + self._charm.reconcile() # type: ignore + + def _on_endpoints_changed(self, _: DatabaseEndpointsChangedEvent) -> None: + """Handle endpoints changed.""" + self._charm.reconcile() # type: ignore + + def get_db(self) -> typing.Optional[DatasourcePostgreSQL]: + """Return a postgresql datasource model. + + Returns: + DatasourcePostgreSQL: The datasource model. + """ + # not using get_relation due this issue + # https://github.com/canonical/operator/issues/1153 + if not self.model.relations.get(self.database.relation_name): + return None + + relation = self.model.get_relation(self.relation_name) + return DatasourcePostgreSQL.from_relation(relation) diff --git a/src/exceptions.py b/src/exceptions.py index f65931e..4998eec 100644 --- a/src/exceptions.py +++ b/src/exceptions.py @@ -18,3 +18,51 @@ def __init__(self, msg: str): msg (str): Explanation of the error. """ self.msg = msg + + +class SystemdError(Exception): + """Exception raised when an action on the systemd service fails. + + Attrs: + msg (str): Explanation of the error. + """ + + def __init__(self, msg: str): + """Initialize a new instance of the SystemdError exception. + + Args: + msg (str): Explanation of the error. + """ + self.msg = msg + + +class RelationDataError(Exception): + """Exception raised when we don't have the expected data in the relation or no relation. + + Attrs: + msg (str): Explanation of the error. + """ + + def __init__(self, msg: str): + """Initialize a new instance of the RelationDataError exception. + + Args: + msg (str): Explanation of the error. + """ + self.msg = msg + + +class SynapseConfigurationFileError(Exception): + """Exception raised when we can't parse the synapse configuration file. + + Attrs: + msg (str): Explanation of the error. + """ + + def __init__(self, msg: str): + """Initialize a new instance of the SynapseConfigurationFileError exception. + + Args: + msg (str): Explanation of the error. + """ + self.msg = msg diff --git a/src/irc.py b/src/irc.py index 6eeaf02..135efd2 100644 --- a/src/irc.py +++ b/src/irc.py @@ -4,30 +4,45 @@ """IRC Bridge charm business logic.""" import logging -import os -import pathlib import shutil -import tempfile -import time -import typing +import subprocess # nosec +import yaml +from charms.operator_libs_linux.v1 import systemd from charms.operator_libs_linux.v2 import snap -import constants import exceptions +from charm_types import CharmConfig, DatasourceMatrix, DatasourcePostgreSQL +from constants import ( + IRC_BRIDGE_CONFIG_DIR_PATH, + IRC_BRIDGE_CONFIG_FILE_PATH, + IRC_BRIDGE_HEALTH_PORT, + IRC_BRIDGE_KEY_ALGO, + IRC_BRIDGE_KEY_OPTS, + IRC_BRIDGE_PEM_FILE_PATH, + IRC_BRIDGE_REGISTRATION_FILE_PATH, + IRC_BRIDGE_SNAP_NAME, + IRC_BRIDGE_TARGET_FILE_PATH, + IRC_BRIDGE_TEMPLATE_CONFIG_FILE_PATH, + IRC_BRIDGE_TEMPLATE_TARGET_FILE_PATH, + IRC_BRIDGE_TEMPLATE_UNIT_FILE_PATH, + IRC_BRIDGE_UNIT_FILE_PATH, + SNAP_PACKAGES, + SYSTEMD_DIR_PATH, +) logger = logging.getLogger(__name__) -class ReloadError(exceptions.SnapError): +class ReloadError(exceptions.SystemdError): """Exception raised when unable to reload the service.""" -class StartError(exceptions.SnapError): +class StartError(exceptions.SystemdError): """Exception raised when unable to start the service.""" -class StopError(exceptions.SnapError): +class StopError(exceptions.SystemdError): """Exception raised when unable to stop the service.""" @@ -35,75 +50,58 @@ class InstallError(exceptions.SnapError): """Exception raised when unable to install dependencies for the service.""" -class IRCBRidgeService: +class IRCBridgeService: """IRC Bridge service class. This class provides the necessary methods to manage the matrix-appservice-irc service. The service requires a connection to a (PostgreSQL) database and to a Matrix homeserver. Both of these will be part of the configuration file created by this class. - Once the configuration file is created, a PEM file will be generated and an app registration file. + Once the configuration file is created, a PEM file will be generated and an app + registration file. The app registration file will be used to register the bridge with the Matrix homeserver. PEM and the configuration file will be used by the matrix-appservice-irc service. """ - def reload(self) -> None: - """Reload the matrix-appservice-irc service. - - Raises: - ReloadError: when encountering a SnapError - """ - try: - cache = snap.SnapCache() - charmed_irc_bridge = cache[constants.IRC_BRIDGE_SNAP_NAME] - charmed_irc_bridge.restart(reload=True) - except snap.SnapError as e: - error_msg = ( - f"An exception occurred when reloading {constants.IRC_BRIDGE_SNAP_NAME}. Reason: {e}" - ) - logger.exception(error_msg) - raise ReloadError(error_msg) from e + def reconcile( + self, db: DatasourcePostgreSQL, matrix: DatasourceMatrix, config: CharmConfig + ) -> None: + """Reconcile the service. - def start(self) -> None: - """Start the matrix-appservice-irc service. + Simple flow: + - Check if the snap is installed + - Check if the configuration files exist + - Check if the service is running - Raises: - StartError: when encountering a SnapError + Args: + db: the database configuration + matrix: the matrix configuration + config: the charm configuration """ - try: - cache = snap.SnapCache() - charmed_irc_bridge = cache[constants.IRC_BRIDGE_SNAP_NAME] - charmed_irc_bridge.start() - except snap.SnapError as e: - error_msg = ( - f"An exception occurred when stopping {constants.IRC_BRIDGE_SNAP_NAME}. Reason: {e}" - ) - logger.exception(error_msg) - raise StartError(error_msg) from e + self.prepare() + self.configure(db, matrix, config) + self.reload() - def stop(self) -> None: - """Stop the matrix-appservice-irc service. + def prepare(self) -> None: + """Prepare the machine. - Raises: - StopError: when encountering a SnapError + Install the snap package and create the configuration directory and file. """ - try: - cache = snap.SnapCache() - charmed_irc_bridge = cache[constants.IRC_BRIDGE_SNAP_NAME] - charmed_irc_bridge.stop() - except snap.SnapError as e: - error_msg = ( - f"An exception occurred when stopping {constants.IRC_BRIDGE_SNAP_NAME}. Reason: {e}" - ) - logger.exception(error_msg) - raise StopError(error_msg) from e - - def prepare(self) -> None: - """Prepare the machine.""" self._install_snap_package( - snap_name=constants.IRC_BRIDGE_SNAP_NAME, - snap_channel=constants.SNAP_PACKAGES[constants.IRC_BRIDGE_SNAP_NAME]["channel"], + snap_name=IRC_BRIDGE_SNAP_NAME, + snap_channel=SNAP_PACKAGES[IRC_BRIDGE_SNAP_NAME]["channel"], ) + if not IRC_BRIDGE_CONFIG_DIR_PATH.exists(): + IRC_BRIDGE_CONFIG_DIR_PATH.mkdir(parents=True) + logger.info("Created directory %s", IRC_BRIDGE_CONFIG_DIR_PATH) + shutil.copy(IRC_BRIDGE_TEMPLATE_CONFIG_FILE_PATH, IRC_BRIDGE_CONFIG_DIR_PATH) + + if not IRC_BRIDGE_UNIT_FILE_PATH.exists() or not IRC_BRIDGE_TARGET_FILE_PATH.exists(): + shutil.copy(IRC_BRIDGE_TEMPLATE_UNIT_FILE_PATH, SYSTEMD_DIR_PATH) + shutil.copy(IRC_BRIDGE_TEMPLATE_TARGET_FILE_PATH, SYSTEMD_DIR_PATH) + systemd.daemon_reload() + systemd.service_enable(IRC_BRIDGE_SNAP_NAME) + def _install_snap_package( self, snap_name: str, snap_channel: str, refresh: bool = False ) -> None: @@ -129,52 +127,123 @@ def _install_snap_package( logger.exception(error_msg) raise InstallError(error_msg) from e - def _write(self, path: pathlib.Path, source: str) -> None: - """Pushes a file to the unit. + def configure( + self, db: DatasourcePostgreSQL, matrix: DatasourceMatrix, config: CharmConfig + ) -> None: + """Configure the service. Args: - path: The path of the file - source: The contents of the file to be pushed + db: the database configuration + matrix: the matrix configuration + config: the charm configuration """ - path.write_text(source, encoding="utf-8") - logger.info("Pushed file %s", path) - - def _generate_PEM_file_local(self) -> str: - """Generate the PEM file content. + self._generate_pem_file_local() + self._generate_app_registration_local(matrix, config) + self._eval_conf_local(db, matrix, config) + + def _generate_pem_file_local(self) -> None: + """Generate the PEM file content.""" + pem_create_command = [ + "/bin/bash", + "-c", + f"[[ -f {IRC_BRIDGE_PEM_FILE_PATH} ]] || " + f"openssl genpkey -out {IRC_BRIDGE_PEM_FILE_PATH} " + f"-outform PEM -algorithm {IRC_BRIDGE_KEY_ALGO} -pkeyopt {IRC_BRIDGE_KEY_OPTS}", + ] + logger.info("Creating PEM file for IRC bridge.") + subprocess.run(pem_create_command, shell=True, check=True, capture_output=True) # nosec + + def _generate_app_registration_local( + self, matrix: DatasourceMatrix, config: CharmConfig + ) -> None: + """Generate the content of the app registration file. - Returns: - A string + Args: + matrix: the matrix configuration + config: the charm configuration """ - pass - - def _generate_conf_local(self) -> str: + app_reg_create_command = [ + "/bin/bash", + "-c", + f"[[ -f {IRC_BRIDGE_REGISTRATION_FILE_PATH} ]] || " + f"matrix-appservice-irc -r -f {IRC_BRIDGE_REGISTRATION_FILE_PATH}" + f" -u https://{matrix.host}:{IRC_BRIDGE_HEALTH_PORT} " + f"-c {IRC_BRIDGE_CONFIG_FILE_PATH} -l {config.bot_nickname}", + ] + logger.info("Creating an app registration file for IRC bridge.") + subprocess.run( + app_reg_create_command, shell=True, check=True, capture_output=True + ) # nosec + + def _eval_conf_local( + self, db: DatasourcePostgreSQL, matrix: DatasourceMatrix, config: CharmConfig + ) -> None: """Generate the content of the irc configuration file. - Returns: - A string + Args: + db: the database configuration + matrix: the matrix configuration + config: the charm configuration + + Raises: + SynapseConfigurationFileError: when encountering a KeyError from the configuration file """ - pass + with open(f"{IRC_BRIDGE_CONFIG_FILE_PATH}", "r", encoding="utf-8") as config_file: + data = yaml.safe_load(config_file) + try: + db_conn = data["database"]["connectionString"] + if db_conn == "" or db_conn != db.uri: + db_conn = data["database"]["connectionString"] + data["homeserver"]["url"] = f"https://{matrix.host}" + data["ircService"]["ident"] = config.ident_enabled + data["ircService"]["permissions"] = {} + for admin in config.bridge_admins: + data["ircService"]["permissions"][admin] = "admin" + except KeyError as e: + logger.exception("KeyError: {%s}", e) + raise exceptions.SynapseConfigurationFileError( + f"KeyError in configuration file: {e}" + ) from e + with open(f"{IRC_BRIDGE_CONFIG_FILE_PATH}", "w", encoding="utf-8") as config_file: + yaml.dump(data, config_file) - def _generate_app_registration_local(self) -> str: - """Generate the content of the app registration file. + def reload(self) -> None: + """Reload the matrix-appservice-irc service. - Returns: - A string + Check if the service is running and reload it. + + Raises: + ReloadError: when encountering a SnapError """ - pass + try: + systemd.service_reload(IRC_BRIDGE_SNAP_NAME) + except systemd.SystemdError as e: + error_msg = f"An exception occurred when reloading {IRC_BRIDGE_SNAP_NAME}." + logger.exception(error_msg) + raise ReloadError(error_msg) from e - def handle_new_db_relation_data(self) -> str: - """Handle new DB relation data. + def start(self) -> None: + """Start the matrix-appservice-irc service. - Returns: - A string + Raises: + StartError: when encountering a SnapError """ - pass + try: + systemd.service_start(IRC_BRIDGE_SNAP_NAME) + except systemd.SystemdError as e: + error_msg = f"An exception occurred when starting {IRC_BRIDGE_SNAP_NAME}." + logger.exception(error_msg) + raise StartError(error_msg) from e - def handle_new_matrix_relation_data(self) -> str: - """Handle new Matrix relation data. + def stop(self) -> None: + """Stop the matrix-appservice-irc service. - Returns: - A string + Raises: + StopError: when encountering a SnapError """ - pass + try: + systemd.service_stop(IRC_BRIDGE_SNAP_NAME) + except snap.SnapError as e: + error_msg = f"An exception occurred when stopping {IRC_BRIDGE_SNAP_NAME}." + logger.exception(error_msg) + raise StopError(error_msg) from e diff --git a/src/matrix_observer.py b/src/matrix_observer.py new file mode 100644 index 0000000..8f487dc --- /dev/null +++ b/src/matrix_observer.py @@ -0,0 +1,42 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Provide the DatabaseObserver class to handle database relation and state.""" + +import typing + +from ops.charm import CharmBase +from ops.framework import Object + +from charm_types import DatasourceMatrix + + +class MatrixObserver(Object): + """The Matrix relation observer.""" + + def __init__(self, charm: CharmBase, relation_name: str): + """Initialize the oserver and register event handlers. + + Args: + charm: The parent charm to attach the observer to. + relation_name: The name of the relation to observe + """ + super().__init__(charm, "matrix-observer") + self._charm = charm + self.relation_name = relation_name + + def _get_relation_data(self) -> typing.Optional[DatasourceMatrix]: + """Get matrix data from relation. + + Returns: + Dict: Information needed for setting environment variables. + """ + return DatasourceMatrix(host="localhost") + + def reconcile(self) -> typing.Optional[DatasourceMatrix]: + """Reconcile the database relation. + + Returns: + Dict: Information needed for setting environment variables. + """ + return self._get_relation_data() diff --git a/templates/config.yaml b/templates/config.yaml new file mode 100644 index 0000000..399c0eb --- /dev/null +++ b/templates/config.yaml @@ -0,0 +1,139 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +homeserver: + url: "https://ubuntu.com" + domain: "ubuntu.com" + enablePresence: true +ircService: + servers: + irc.eu.libera.chat: + name: "LiberaChat" + onlyAdditionalAddresses: false + networkId: "libera" + port: 6697 + ssl: true + sslselfsign: true + sasl: true + allowExpiredCerts: false + sendConnectionMessages: true + quitDebounce: + enabled: false + quitsPerSecond: 5 + delayMinMs: 3600000 # 1h + delayMaxMs: 7200000 # 2h + modePowerMap: + o: 50 + v: 0 + botConfig: + enabled: true + nick: "UbuntuLiberaBot" + username: "ubunbtuliberabot" + joinChannelsIfNoUsers: true + privateMessages: + enabled: true + federate: true + dynamicChannels: + enabled: true + createAlias: true + published: false + useHomeserverDirectory: true + joinRule: public + federate: true + aliasTemplate: "#libera_$CHANNEL" + membershipLists: + enabled: true + floodDelayMs: 10000 + global: + ircToMatrix: + initial: false + incremental: true + requireMatrixJoined: false + matrixToIrc: + initial: false + incremental: true + ignoreIdleUsersOnStartup: + enabled: false + idleForHours: 720 + exclude: "foobar" + matrixClients: + userTemplate: "@libera_$NICK" + displayName: "$NICK" + joinAttempts: -1 + ircClients: + nickTemplate: "$DISPLAY[m]" + allowNickChanges: true + maxClients: 30 + ipv6: + only: false + idleTimeout: 0 + reconnectIntervalMs: 5000 + concurrentReconnectLimit: 50 + lineLimit: 3 + realnameFormat: "mxid" + kickOn: + channelJoinFailure: true + ircConnectionFailure: true + userQuit: true + bridgeInfoState: + enabled: false + initial: false + ident: + enabled: false + port: 1113 + address: "::" + logging: + level: "debug" + logfile: "debug.log" + errfile: "errors.log" + toConsole: true + maxFiles: 5 + metrics: + enabled: false + port: 7001 + host: 127.0.0.1 + userActivityThresholdHours: 72 # 3 days + remoteUserAgeBuckets: + - "1h" + - "1d" + - "1w" + debugApi: + enabled: false + port: 11100 + provisioning: + enabled: false + widget: false + requestTimeoutSeconds: 300 + rules: + userIds: + exempt: + - "@doubleagent:example.com" + conflict: + - "@.*:example.com" + roomLimit: 50 + http: + port: 7700 + host: 127.0.0.1 + passwordEncryptionKeyPath: "/data/config/irc_passkey.pem" + matrixHandler: + eventCacheSize: 4096 + shortReplyTemplate: "$NICK: $REPLY" + longReplyTemplate: "<$NICK> \"$ORIGINAL\" <- $REPLY" + shortReplyTresholdSeconds: 300 + userActivity: + minUserActiveDays: 1 + inactiveAfterDays: 30 + ircHandler: + mapIrcMentionsToMatrix: "on" # This can be "on", "off", "force-off". + powerLevelGracePeriod: 1000 + perRoomConfig: + enabled: false +advanced: + maxHttpSockets: 1000 + maxTxnSize: 10000000 +sentry: + enabled: false + dsn: "https://@sentry.io/" +database: + engine: "postgres" + connectionString: "" diff --git a/templates/matrix-appservice-irc.service b/templates/matrix-appservice-irc.service new file mode 100644 index 0000000..b1fb76c --- /dev/null +++ b/templates/matrix-appservice-irc.service @@ -0,0 +1,18 @@ +[Unit] +Description=Synapse main + +# This service should be restarted when the synapse target is restarted. +PartOf=matrix-appservice-irc.target +ReloadPropagatedFrom=matrix-appservice-irc.target + +[Service] +Type=notify +NotifyAccess=main +ExecStart=matrix-appservice-irc -c /etc/matrix-appservice-irc/config.yaml -f /etc/matrix-appservice-irc/appservice-registration-irc.yaml -p 5446 +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +RestartSec=3 +SyslogIdentifier=matrix-appservice-irc + +[Install] +WantedBy=matrix-appservice-irc.target diff --git a/templates/matrix-appservice-irc.target b/templates/matrix-appservice-irc.target new file mode 100644 index 0000000..a9b6ec9 --- /dev/null +++ b/templates/matrix-appservice-irc.target @@ -0,0 +1,6 @@ +[Unit] +Description=IRC Bridge target +After=network.target + +[Install] +WantedBy=multi-user.target diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index f19d371..fbf516f 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -4,12 +4,9 @@ """Helper functions for the integration tests.""" -import json -import pathlib import random import string import tempfile -import typing import ops from pytest_operator.plugin import OpsTest @@ -75,7 +72,7 @@ async def run_on_unit(ops_test: OpsTest, unit_name: str, command: str) -> str: return stdout -# pylint: disable=too-many-arguments +# pylint: disable=too-many-positional-arguments,too-many-arguments async def push_to_unit( ops_test: OpsTest, unit: ops.model.Unit, @@ -130,3 +127,15 @@ async def dispatch_to_unit( "--", f"export JUJU_DISPATCH_PATH=hooks/{hook_name}; ./dispatch", ) + + +async def set_config(ops_test: OpsTest, app_name: str, config: dict): + """Set the charm configuration. + + Args: + ops_test: The ops test framework instance + app_name: the name of the application to set the configuration + config: the configuration to set + """ + assert ops_test.model + await ops_test.model.applications[app_name].set_config(config=config) diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index 759af2a..c990464 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -5,14 +5,11 @@ """Integration tests.""" import logging -import time -import typing import ops import pytest from pytest_operator.plugin import OpsTest -import constants import tests.integration.helpers logger = logging.getLogger(__name__) @@ -20,30 +17,19 @@ @pytest.mark.asyncio @pytest.mark.abort_on_fail -async def test_lifecycle(app: ops.model.Application, ops_test: OpsTest): +async def test_lifecycle_before_relations(app: ops.model.Application, ops_test: OpsTest): """ arrange: build and deploy the charm. act: nothing. - assert: that the charm ends up in an active state. + assert: that the charm ends up in blocked state because of missing relations. """ + # Set config so the charm can start + config = {"bridge_admins": "admin:example.com", "bot_nickname": "bot"} + await tests.integration.helpers.set_config(ops_test, app.name, config) # Application actually does have units unit = app.units[0] # type: ignore # Mypy has difficulty with ActiveStatus - assert unit.workload_status == ops.model.ActiveStatus.name # type: ignore - - await tests.integration.helpers.dispatch_to_unit(ops_test, unit, "stop") - time.sleep(5) - _, service_status, _ = await ops_test.juju( - "exec", "--unit", unit.name, "snap services matrix-appservice-irc" - ) - logger.info(service_status) - assert "inactive" in service_status - - await tests.integration.helpers.dispatch_to_unit(ops_test, unit, "start") - time.sleep(5) - _, service_status, _ = await ops_test.juju( - "exec", "--unit", unit.name, "snap services matrix-appservice-irc" - ) - logger.info(service_status) - assert "active" in service_status + assert unit.workload_status == ops.model.BlockedStatus.name # type: ignore + # Assert part of the message + assert ops.BlockedStatus("Database relation not found") == app.status diff --git a/tests/unit/test_charm_types.py b/tests/unit/test_charm_types.py new file mode 100644 index 0000000..d3acd2e --- /dev/null +++ b/tests/unit/test_charm_types.py @@ -0,0 +1,34 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Tests for the charm_types module.""" + +from secrets import token_hex + +from charm_types import DatasourcePostgreSQL + + +def test_datasource_postgresql(): + """Test the DatasourcePostgreSQL class. + + arrange: Create a DatasourcePostgreSQL instance. + act: Access the instance attributes. + assert: The attributes are the same as the input values + """ + user = "test_user" + password = token_hex(16) + host = "localhost" + port = "5432" + db = "test_db" + uri = f"postgres://{user}:{password}@{host}:{port}/{db}" + + datasource = DatasourcePostgreSQL( + user=user, password=password, host=host, port=port, db=db, uri=uri + ) + + assert datasource.user == user + assert datasource.password == password + assert datasource.host == host + assert datasource.port == port + assert datasource.db == db + assert datasource.uri == uri diff --git a/tests/unit/test_database_observer.py b/tests/unit/test_database_observer.py new file mode 100644 index 0000000..a4aecf8 --- /dev/null +++ b/tests/unit/test_database_observer.py @@ -0,0 +1,102 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Database observer unit tests.""" + +from secrets import token_hex +from unittest.mock import patch + +import ops +import pytest +from ops.testing import Harness +from pydantic import ValidationError + +from charm_types import DatasourcePostgreSQL +from database_observer import DatabaseObserver + +REQUIRER_METADATA = """ +name: observer-charm +requires: + database: + interface: postgresql_client +""" + + +class ObservedCharm(ops.CharmBase): + """Class for requirer charm testing.""" + + def __init__(self, *args): + """Construct. + + Args: + args: Variable list of positional arguments passed to the parent constructor. + """ + super().__init__(*args) + self.database = DatabaseObserver(self, "database") + + def reconcile(self): + """Reconcile method.""" + + +def test_database_created_calls_reconcile(): + """ + arrange: set up a charm and a database relation. + act: trigger a database created event. + assert: the reconcile method is called. + """ + harness = Harness(ObservedCharm, meta=REQUIRER_METADATA) + harness.begin() + harness.add_relation("database", "database-provider") + relation = harness.charm.framework.model.get_relation("database", 0) + + with patch.object( + harness.charm.database._charm, "reconcile" # pylint: disable=protected-access + ) as mock_reconcile: + harness.charm.database.database.on.database_created.emit(relation) + assert mock_reconcile.called + + +def test_get_db(): + """ + arrange: set up a charm and a database relation with an empty databag. + act: populate the relation databag. + assert: the db matches the databag content. + """ + password = token_hex(16) + harness = Harness(ObservedCharm, meta=REQUIRER_METADATA) + harness.begin() + harness.add_relation( + "database", + "database-provider", + app_data={ + "database": "ircbridge", + "endpoints": "postgresql-k8s-primary.local:5432", + "password": password, + "username": "user1", + }, + ) + + assert harness.charm.database.get_db() == ( + DatasourcePostgreSQL( + user="user1", + password=password, + host="postgresql-k8s-primary.local", + port="5432", + db="ircbridge", + uri=f"postgres://user1:{password}@postgresql-k8s-primary.local:5432/ircbridge", + ) + ) + + +def test_get_db_when_no_relation_data(): + """ + arrange: set up a charm and a database relation with an empty databag. + act:. + assert: the db is None. + """ + harness = Harness(ObservedCharm, meta=REQUIRER_METADATA) + harness.begin() + harness.add_relation("database", "database-provider") + + with pytest.raises(ValidationError): + harness.charm.database.get_db() # pylint: disable=pointless-statement diff --git a/tests/unit/test_irc.py b/tests/unit/test_irc.py new file mode 100644 index 0000000..399803b --- /dev/null +++ b/tests/unit/test_irc.py @@ -0,0 +1,451 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Tests for the IRC bridge service.""" + +import builtins +import pathlib +import shutil +import subprocess # nosec +from secrets import token_hex +from unittest.mock import MagicMock + +import pytest +import yaml +from charms.operator_libs_linux.v1 import systemd +from charms.operator_libs_linux.v2 import snap + +from charm_types import CharmConfig, DatasourceMatrix, DatasourcePostgreSQL +from constants import ( + IRC_BRIDGE_CONFIG_DIR_PATH, + IRC_BRIDGE_CONFIG_FILE_PATH, + IRC_BRIDGE_HEALTH_PORT, + IRC_BRIDGE_KEY_ALGO, + IRC_BRIDGE_KEY_OPTS, + IRC_BRIDGE_PEM_FILE_PATH, + IRC_BRIDGE_REGISTRATION_FILE_PATH, + IRC_BRIDGE_SNAP_NAME, + IRC_BRIDGE_TEMPLATE_CONFIG_FILE_PATH, + IRC_BRIDGE_TEMPLATE_TARGET_FILE_PATH, + IRC_BRIDGE_TEMPLATE_UNIT_FILE_PATH, + SYSTEMD_DIR_PATH, +) +from irc import InstallError, IRCBridgeService, ReloadError, StartError, StopError + + +@pytest.fixture(name="irc_bridge_service") +def irc_bridge_service_fixture(): + """Return a new instance of the IRCBridgeService.""" + return IRCBridgeService() + + +def test_reconcile_calls_prepare_configure_and_reload_methods(irc_bridge_service, mocker): + """Test that the reconcile method calls the prepare, configure, and reload methods. + + arrange: Prepare mocks for the prepare, configure, and reload methods. + act: Call the reconcile method. + assert: Ensure that the prepare, configure, and reload methods were called + exactly once. + """ + mock_prepare = mocker.patch.object(irc_bridge_service, "prepare") + mock_configure = mocker.patch.object(irc_bridge_service, "configure") + mock_reload = mocker.patch.object(irc_bridge_service, "reload") + + password = token_hex(16) + db = DatasourcePostgreSQL( + user="test_user", + password=password, + host="localhost", + port="5432", + db="test_db", + uri=f"postgres://test_user:{password}@localhost:5432/test_db", + ) + matrix = DatasourceMatrix(host="matrix.example.com") + config = CharmConfig( + ident_enabled=True, + bot_nickname="my_bot", + bridge_admins="admin1:example.com,admin2:example.com", + ) + + irc_bridge_service.reconcile(db, matrix, config) + + mock_prepare.assert_called_once() + mock_configure.assert_called_once_with(db, matrix, config) + mock_reload.assert_called_once() + + +def test_prepare_installs_snap_package_and_creates_configuration_files(irc_bridge_service, mocker): + """Test that the prepare method installs the snap package and creates configuration files. + + arrange: Prepare mocks for the _install_snap_package, shutil.copy, pathlib.Path.mkdir, + systemd.daemon_reload, and systemd.service_enable methods. + act: Call the prepare method. + assert: Ensure that the _install_snap_package, shutil.copy, pathlib.Path.mkdir, + systemd.daemon_reload, and systemd.service_enable methods were called exactly once. + """ + mock_install_snap_package = mocker.patch.object(irc_bridge_service, "_install_snap_package") + mock_copy = mocker.patch.object(shutil, "copy") + mock_mkdir = mocker.patch.object(pathlib.Path, "mkdir") + mock_daemon_reload = mocker.patch.object(systemd, "daemon_reload") + mock_service_enable = mocker.patch.object(systemd, "service_enable") + mocker.patch.object(pathlib.Path, "exists", return_value=False) + + irc_bridge_service.prepare() + + mock_install_snap_package.assert_called_once_with( + snap_name=IRC_BRIDGE_SNAP_NAME, snap_channel="edge" + ) + mock_mkdir.assert_called_once_with(parents=True) + copy_calls = [ + mocker.call( + IRC_BRIDGE_TEMPLATE_CONFIG_FILE_PATH, + IRC_BRIDGE_CONFIG_DIR_PATH, + ), + mocker.call( + IRC_BRIDGE_TEMPLATE_UNIT_FILE_PATH, + SYSTEMD_DIR_PATH, + ), + mocker.call( + IRC_BRIDGE_TEMPLATE_TARGET_FILE_PATH, + SYSTEMD_DIR_PATH, + ), + ] + mock_copy.assert_has_calls(copy_calls) + mock_daemon_reload.assert_called_once() + mock_service_enable.assert_called_once_with(IRC_BRIDGE_SNAP_NAME) + + +def test_prepare_does_not_copy_files_if_already_exist(irc_bridge_service, mocker): + """Test that the prepare method does not copy files if they already exist. + + arrange: Prepare mocks for the _install_snap_package, shutil.copy, pathlib.Path.mkdir, + systemd.daemon_reload, and systemd.service_enable methods. Mock the exists method to return + True. + act: Call the prepare method. + assert: Ensure that the _install_snap_package, shutil.copy, pathlib.Path.mkdir, + systemd.daemon_reload, and systemd.service_enable methods were called exactly once. + """ + mock_install_snap_package = mocker.patch.object(irc_bridge_service, "_install_snap_package") + mock_copy = mocker.patch.object(shutil, "copy") + mock_mkdir = mocker.patch.object(pathlib.Path, "mkdir") + mock_daemon_reload = mocker.patch.object(systemd, "daemon_reload") + mock_service_enable = mocker.patch.object(systemd, "service_enable") + + mocker.patch.object(pathlib.Path, "exists", return_value=True) + + irc_bridge_service.prepare() + + mock_install_snap_package.assert_called_once_with( + snap_name=IRC_BRIDGE_SNAP_NAME, snap_channel="edge" + ) + mock_mkdir.assert_not_called() + mock_copy.assert_not_called() + mock_daemon_reload.assert_not_called() + mock_service_enable.assert_called_once_with(IRC_BRIDGE_SNAP_NAME) + + +def test_prepare_raises_install_error_if_snap_installation_fails(irc_bridge_service, mocker): + """Test that the prepare method raises an InstallError if the snap installation fails. + + arrange: Prepare a mock for the _install_snap_package method that raises an InstallError. + act: Call the prepare method. + assert: Ensure that an InstallError is raised. + """ + mock_install_snap_package = mocker.patch.object( + irc_bridge_service, "_install_snap_package", side_effect=InstallError("oops") + ) + + with pytest.raises(InstallError): + irc_bridge_service.prepare() + + mock_install_snap_package.assert_called_once_with( + snap_name=IRC_BRIDGE_SNAP_NAME, snap_channel="edge" + ) + + +def test_install_snap_package_installs_snap_if_not_present(irc_bridge_service, mocker): + """Test that the _install_snap_package method installs the snap package if it is not present. + + arrange: Prepare mocks for the SnapCache and SnapPackage classes and the ensure method. + act: Call the _install_snap_package method. + assert: Ensure that the SnapCache and SnapPackage classes were called exactly once and that + the ensure method was called with the correct arguments. + """ + mock_snap_cache = mocker.patch.object(snap, "SnapCache") + mock_snap_package = MagicMock() + mock_snap_package.present = False + mock_snap_cache.return_value = {IRC_BRIDGE_SNAP_NAME: mock_snap_package} + mock_ensure = mocker.patch.object(mock_snap_package, "ensure") + + irc_bridge_service._install_snap_package( # pylint: disable=protected-access + snap_name=IRC_BRIDGE_SNAP_NAME, snap_channel="edge" + ) + + mock_snap_cache.assert_called_once() + mock_ensure.assert_called_once_with(snap.SnapState.Latest, channel="edge") + + +def test_install_snap_package_does_not_install_snap_if_already_present(irc_bridge_service, mocker): + """Test that the _install_snap_package method does not install the snap package if present. + + arrange: Prepare mocks for the SnapCache and SnapPackage classes and the ensure method. + act: Call the _install_snap_package method. + assert: Ensure that the SnapCache and SnapPackage classes were called exactly once and that + the ensure method was not called. + """ + mock_snap_cache = mocker.patch.object(snap, "SnapCache") + mock_snap_package = MagicMock() + mock_snap_package.present = True + mock_snap_cache.return_value = {IRC_BRIDGE_SNAP_NAME: mock_snap_package} + mock_ensure = mocker.patch.object(mock_snap_package, "ensure") + + irc_bridge_service._install_snap_package( # pylint: disable=protected-access + snap_name=IRC_BRIDGE_SNAP_NAME, snap_channel="edge" + ) + + mock_snap_cache.assert_called_once() + mock_ensure.assert_not_called() + + +def test_install_snap_package_refreshes_snap_if_already_present(irc_bridge_service, mocker): + """Test that the _install_snap_package method refreshes the snap if it is already present. + + arrange: Prepare mocks for the SnapCache and SnapPackage classes and the ensure method. + act: Call the _install_snap_package method with the refresh argument set to True. + assert: Ensure that the SnapCache and SnapPackage classes were called exactly once and that + the ensure method was called with the correct arguments. + """ + mock_snap_cache = mocker.patch.object(snap, "SnapCache") + mock_snap_package = MagicMock() + mock_snap_package.present = True + mock_snap_cache.return_value = {IRC_BRIDGE_SNAP_NAME: mock_snap_package} + mock_ensure = mocker.patch.object(mock_snap_package, "ensure") + + irc_bridge_service._install_snap_package( # pylint: disable=protected-access + snap_name=IRC_BRIDGE_SNAP_NAME, snap_channel="edge", refresh=True + ) + + mock_snap_cache.assert_called_once() + mock_ensure.assert_called_once_with(snap.SnapState.Latest, channel="edge") + + +def test_install_snap_package_raises_install_error_if_snap_installation_fails( + irc_bridge_service, mocker +): + """Test that the _install_snap_package method raises an InstallError if the snap install fails. + + arrange: Prepare mocks for the SnapCache and SnapPackage classes and the ensure method. + act: Call the _install_snap_package method with the refresh argument set to True. + assert: Ensure that an InstallError is raised. + """ + mock_snap_cache = mocker.patch.object(snap, "SnapCache") + mock_snap_package = MagicMock() + mock_snap_package.present = False + mock_snap_cache.return_value = {IRC_BRIDGE_SNAP_NAME: mock_snap_package} + mock_ensure = mocker.patch.object(mock_snap_package, "ensure", side_effect=snap.SnapError) + + with pytest.raises(InstallError): + irc_bridge_service._install_snap_package( # pylint: disable=protected-access + snap_name=IRC_BRIDGE_SNAP_NAME, snap_channel="edge" + ) + + mock_snap_cache.assert_called_once() + mock_ensure.assert_called_once_with(snap.SnapState.Latest, channel="edge") + + +def test_configure_generates_pem_file_local(irc_bridge_service, mocker): + """Test that the _generate_pem_file_local method generates the PEM file. + + arrange: Prepare a mock for the subprocess.run method. + act: Call the _generate_pem_file_local method.j + assert: Ensure that the subprocess.run method was called with the correct arguments. + """ + mock_run = mocker.patch.object(subprocess, "run") + + irc_bridge_service._generate_pem_file_local() # pylint: disable=protected-access + + # pylint: disable=duplicate-code + mock_run.assert_called_once_with( + [ + "/bin/bash", + "-c", + f"[[ -f {IRC_BRIDGE_PEM_FILE_PATH} ]] || " + f"openssl genpkey -out {IRC_BRIDGE_PEM_FILE_PATH} " + f"-outform PEM -algorithm {IRC_BRIDGE_KEY_ALGO} -pkeyopt {IRC_BRIDGE_KEY_OPTS}", + ], + shell=True, # nosec + check=True, + capture_output=True, + ) + # pylint: enable=duplicate-code + + +def test_configure_generates_app_registration_local(irc_bridge_service, mocker): + """Test that the _generate_app_registration_local method generates the app registration file. + + arrange: Prepare a mock for the subprocess.run method. + act: Call the _generate_app_registration_local method. + assert: Ensure that the subprocess.run method was called with the correct arguments. + """ + mock_run = mocker.patch.object(subprocess, "run") + + matrix = DatasourceMatrix(host="matrix.example.com") + config = CharmConfig( + ident_enabled=True, + bot_nickname="my_bot", + bridge_admins="admin1:example.com,admin2:example.com", + ) + + irc_bridge_service._generate_app_registration_local( # pylint: disable=protected-access + matrix, config + ) + + # pylint: disable=duplicate-code + mock_run.assert_called_once_with( + [ + "/bin/bash", + "-c", + f"[[ -f {IRC_BRIDGE_REGISTRATION_FILE_PATH} ]] || " + f"matrix-appservice-irc -r -f {IRC_BRIDGE_REGISTRATION_FILE_PATH}" + f" -u https://{matrix.host}:{IRC_BRIDGE_HEALTH_PORT} " + f"-c {IRC_BRIDGE_CONFIG_FILE_PATH} -l {config.bot_nickname}", + ], + shell=True, # nosec + check=True, + capture_output=True, + ) + # pylint: enable=duplicate-code + + +def test_configure_evaluates_configuration_file_local(irc_bridge_service, mocker): + """Test that the _eval_conf_local method evaluates the configuration file. + + arrange: Prepare mocks for the open, yaml.safe_load, and yaml.dump methods. + act: Call the _eval_conf_local method. + assert: Ensure that the open, yaml.safe_load, and yaml.dump methods were called as expected. + """ + mock_open = mocker.patch.object(builtins, "open") + mock_safe_load = mocker.patch.object(yaml, "safe_load") + mock_dump = mocker.patch.object(yaml, "dump") + + password = token_hex(16) + db = DatasourcePostgreSQL( + user="test_user", + password=password, + host="localhost", + port="5432", + db="test_db", + uri=f"postgres://test_user:{password}@localhost:5432/test_db", + ) + matrix = DatasourceMatrix(host="matrix.example.com") + config = CharmConfig( + ident_enabled=True, + bot_nickname="my_bot", + bridge_admins="admin1:example.com,admin2:example.com", + ) + + irc_bridge_service._eval_conf_local(db, matrix, config) # pylint: disable=protected-access + + calls = [ + mocker.call(f"{IRC_BRIDGE_CONFIG_FILE_PATH.absolute()}", "r", encoding="utf-8"), + mocker.call(f"{IRC_BRIDGE_CONFIG_FILE_PATH.absolute()}", "w", encoding="utf-8"), + ] + + mock_open.assert_has_calls(calls, any_order=True) + mock_safe_load.assert_called_once_with( + mock_open().__enter__() # pylint: disable=unnecessary-dunder-call + ) + mock_dump.assert_called_once_with( + mock_safe_load(), mock_open().__enter__() # pylint: disable=unnecessary-dunder-call + ) + + +def test_reload_reloads_matrix_appservice_irc_service(irc_bridge_service, mocker): + """Test that the reload method reloads the matrix-appservice-irc service. + + arrange: Prepare a mock for the systemd.service_reload method. + act: Call the reload method. + assert: Ensure that the systemd.service_reload method was called with the correct arguments. + """ + mock_service_reload = mocker.patch.object(systemd, "service_reload") + + irc_bridge_service.reload() + + mock_service_reload.assert_called_once_with(IRC_BRIDGE_SNAP_NAME) + + +def test_reload_raises_reload_error_if_reload_fails(irc_bridge_service, mocker): + """Test that the reload method raises a ReloadError if the service reload fails. + + arrange: Prepare a mock for the systemd.service_reload method that raises a SystemdError. + act: Call the reload method. + assert: Ensure that a ReloadError is raised. + """ + mock_service_reload = mocker.patch.object( + systemd, "service_reload", side_effect=systemd.SystemdError + ) + + with pytest.raises(ReloadError): + irc_bridge_service.reload() + + mock_service_reload.assert_called_once_with(IRC_BRIDGE_SNAP_NAME) + + +def test_start_starts_matrix_appservice_irc_service(irc_bridge_service, mocker): + """Test that the start method starts the matrix-appservice-irc service. + + arrange: Prepare a mock for the systemd.service_start method. + act: Call the start method. + assert: Ensure that the systemd.service_start method was called with the correct arguments. + """ + mock_service_start = mocker.patch.object(systemd, "service_start") + + irc_bridge_service.start() + + mock_service_start.assert_called_once_with(IRC_BRIDGE_SNAP_NAME) + + +def test_start_raises_start_error_if_start_fails(irc_bridge_service, mocker): + """Test that the start method raises a StartError if the service start fails. + + arrange: Prepare a mock for the systemd.service_start method that raises a SystemdError. + act: Call the start method. + assert: Ensure that a StartError is raised. + """ + mock_service_start = mocker.patch.object( + systemd, "service_start", side_effect=systemd.SystemdError + ) + + with pytest.raises(StartError): + irc_bridge_service.start() + + mock_service_start.assert_called_once_with(IRC_BRIDGE_SNAP_NAME) + + +def test_stop_stops_matrix_appservice_irc_service(irc_bridge_service, mocker): + """Test that the stop method stops the matrix-appservice-irc service. + + arrange: Prepare a mock for the systemd.service_stop method. + act: Call the stop method. + assert: Ensure that the systemd.service_stop method was called with the correct arguments. + """ + mock_service_stop = mocker.patch.object(systemd, "service_stop") + + irc_bridge_service.stop() + + mock_service_stop.assert_called_once_with(IRC_BRIDGE_SNAP_NAME) + + +def test_stop_raises_stop_error_if_stop_fails(irc_bridge_service, mocker): + """Test that the stop method raises a StopError if the service stop fails. + + arrange: Prepare a mock for the systemd.service_stop method that raises a SystemdError. + act: Call the stop method. + assert: Ensure that a StopError is raised. + """ + mock_service_stop = mocker.patch.object(systemd, "service_stop", side_effect=snap.SnapError) + + with pytest.raises(StopError): + irc_bridge_service.stop() + + mock_service_stop.assert_called_once_with(IRC_BRIDGE_SNAP_NAME) diff --git a/tox.ini b/tox.ini index 1495890..9d2d784 100644 --- a/tox.ini +++ b/tox.ini @@ -9,8 +9,7 @@ envlist = lint, unit, static, coverage-report [vars] src_path = {toxinidir}/src/ tst_path = {toxinidir}/tests/ -lib_path = {toxinidir}/lib/charms/bind -all_path = {[vars]src_path} {[vars]tst_path} {[vars]lib_path} +all_path = {[vars]src_path} {[vars]tst_path} [testenv] setenv = @@ -74,6 +73,7 @@ description = Run unit tests deps = coverage[toml] pytest + pytest-mock -r{toxinidir}/requirements.txt commands = coverage run --source={[vars]src_path},{[vars]lib_path} \