From bb3f738a24bc11dcebb613f3edbeaac29f2b9a26 Mon Sep 17 00:00:00 2001 From: "Amanda H. L. de Andrade Katz" Date: Mon, 17 Jul 2023 08:59:12 -0300 Subject: [PATCH] feat(ISD-843): Add Postgresql integration (#13) --- charmcraft.yaml | 4 + .../data_platform_libs/v0/data_interfaces.py | 1407 +++++++++++++++++ metadata.yaml | 4 + requirements.txt | 4 +- src/charm.py | 80 +- src/charm_state.py | 39 +- src/charm_types.py | 20 +- src/database_client.py | 117 ++ src/database_observer.py | 132 ++ src/exceptions.py | 40 +- src/pebble.py | 100 ++ src/synapse.py | 65 +- tests/integration/conftest.py | 33 +- tests/integration/test_charm.py | 6 +- tests/unit/conftest.py | 52 +- tests/unit/test_charm.py | 105 +- tests/unit/test_database.py | 267 ++++ tests/unit/test_reset_instance_action.py | 135 ++ 18 files changed, 2393 insertions(+), 217 deletions(-) create mode 100644 lib/charms/data_platform_libs/v0/data_interfaces.py create mode 100644 src/database_client.py create mode 100644 src/database_observer.py create mode 100644 src/pebble.py create mode 100644 tests/unit/test_database.py create mode 100644 tests/unit/test_reset_instance_action.py diff --git a/charmcraft.yaml b/charmcraft.yaml index 59c6e35a..d40f1824 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -12,3 +12,7 @@ bases: run-on: - name: ubuntu channel: "22.04" +parts: + charm: + charm-binary-python-packages: + - psycopg2-binary==2.9.6 diff --git a/lib/charms/data_platform_libs/v0/data_interfaces.py b/lib/charms/data_platform_libs/v0/data_interfaces.py new file mode 100644 index 00000000..10bda6db --- /dev/null +++ b/lib/charms/data_platform_libs/v0/data_interfaces.py @@ -0,0 +1,1407 @@ +# Copyright 2023 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Library to manage the relation for the data-platform products. + +This library contains the Requires and Provides classes for handling the relation +between an application and multiple managed application supported by the data-team: +MySQL, Postgresql, MongoDB, Redis, and Kafka. + +### Database (MySQL, Postgresql, MongoDB, and Redis) + +#### Requires Charm +This library is a uniform interface to a selection of common database +metadata, with added custom events that add convenience to database management, +and methods to consume the application related data. + + +Following an example of using the DatabaseCreatedEvent, in the context of the +application charm code: + +```python + +from charms.data_platform_libs.v0.data_interfaces import ( + DatabaseCreatedEvent, + DatabaseRequires, +) + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Charm events defined in the database requires charm library. + self.database = DatabaseRequires(self, relation_name="database", database_name="database") + self.framework.observe(self.database.on.database_created, self._on_database_created) + + def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + + # Start application with rendered configuration + self._start_application(config_file) + + # Set active status + self.unit.status = ActiveStatus("received database credentials") +``` + +As shown above, the library provides some custom events to handle specific situations, +which are listed below: + +- database_created: event emitted when the requested database is created. +- endpoints_changed: event emitted when the read/write endpoints of the database have changed. +- read_only_endpoints_changed: event emitted when the read-only endpoints of the database + have changed. Event is not triggered if read/write endpoints changed too. + +If it is needed to connect multiple database clusters to the same relation endpoint +the application charm can implement the same code as if it would connect to only +one database cluster (like the above code example). + +To differentiate multiple clusters connected to the same relation endpoint +the application charm can use the name of the remote application: + +```python + +def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Get the remote app name of the cluster that triggered this event + cluster = event.relation.app.name +``` + +It is also possible to provide an alias for each different database cluster/relation. + +So, it is possible to differentiate the clusters in two ways. +The first is to use the remote application name, i.e., `event.relation.app.name`, as above. + +The second way is to use different event handlers to handle each cluster events. +The implementation would be something like the following code: + +```python + +from charms.data_platform_libs.v0.data_interfaces import ( + DatabaseCreatedEvent, + DatabaseRequires, +) + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Define the cluster aliases and one handler for each cluster database created event. + self.database = DatabaseRequires( + self, + relation_name="database", + database_name="database", + relations_aliases = ["cluster1", "cluster2"], + ) + self.framework.observe( + self.database.on.cluster1_database_created, self._on_cluster1_database_created + ) + self.framework.observe( + self.database.on.cluster2_database_created, self._on_cluster2_database_created + ) + + def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster1 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + + def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster2 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + +``` + +When it's needed to check whether a plugin (extension) is enabled on the PostgreSQL +charm, you can use the is_postgresql_plugin_enabled method. To use that, you need to +add the following dependency to your charmcraft.yaml file: + +```yaml + +parts: + charm: + charm-binary-python-packages: + - psycopg[binary] + +``` + +### Provider Charm + +Following an example of using the DatabaseRequestedEvent, in the context of the +database charm code: + +```python +from charms.data_platform_libs.v0.data_interfaces import DatabaseProvides + +class SampleCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + # Charm events defined in the database provides charm library. + self.provided_database = DatabaseProvides(self, relation_name="database") + self.framework.observe(self.provided_database.on.database_requested, + self._on_database_requested) + # Database generic helper + self.database = DatabaseHelper() + + def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: + # Handle the event triggered by a new database requested in the relation + # Retrieve the database name using the charm library. + db_name = event.database + # generate a new user credential + username = self.database.generate_user() + password = self.database.generate_password() + # set the credentials for the relation + self.provided_database.set_credentials(event.relation.id, username, password) + # set other variables for the relation event.set_tls("False") +``` +As shown above, the library provides a custom event (database_requested) to handle +the situation when an application charm requests a new database to be created. +It's preferred to subscribe to this event instead of relation changed event to avoid +creating a new database when other information other than a database name is +exchanged in the relation databag. + +### Kafka + +This library is the interface to use and interact with the Kafka charm. This library contains +custom events that add convenience to manage Kafka, and provides methods to consume the +application related data. + +#### Requirer Charm + +```python + +from charms.data_platform_libs.v0.data_interfaces import ( + BootstrapServerChangedEvent, + KafkaRequires, + TopicCreatedEvent, +) + +class ApplicationCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + self.kafka = KafkaRequires(self, "kafka_client", "test-topic") + self.framework.observe( + self.kafka.on.bootstrap_server_changed, self._on_kafka_bootstrap_server_changed + ) + self.framework.observe( + self.kafka.on.topic_created, self._on_kafka_topic_created + ) + + def _on_kafka_bootstrap_server_changed(self, event: BootstrapServerChangedEvent): + # Event triggered when a bootstrap server was changed for this application + + new_bootstrap_server = event.bootstrap_server + ... + + def _on_kafka_topic_created(self, event: TopicCreatedEvent): + # Event triggered when a topic was created for this application + username = event.username + password = event.password + tls = event.tls + tls_ca= event.tls_ca + bootstrap_server event.bootstrap_server + consumer_group_prefic = event.consumer_group_prefix + zookeeper_uris = event.zookeeper_uris + ... + +``` + +As shown above, the library provides some custom events to handle specific situations, +which are listed below: + +- topic_created: event emitted when the requested topic is created. +- bootstrap_server_changed: event emitted when the bootstrap server have changed. +- credential_changed: event emitted when the credentials of Kafka changed. + +### Provider Charm + +Following the previous example, this is an example of the provider charm. + +```python +class SampleCharm(CharmBase): + +from charms.data_platform_libs.v0.data_interfaces import ( + KafkaProvides, + TopicRequestedEvent, +) + + def __init__(self, *args): + super().__init__(*args) + + # Default charm events. + self.framework.observe(self.on.start, self._on_start) + + # Charm events defined in the Kafka Provides charm library. + self.kafka_provider = KafkaProvides(self, relation_name="kafka_client") + self.framework.observe(self.kafka_provider.on.topic_requested, self._on_topic_requested) + # Kafka generic helper + self.kafka = KafkaHelper() + + def _on_topic_requested(self, event: TopicRequestedEvent): + # Handle the on_topic_requested event. + + topic = event.topic + relation_id = event.relation.id + # set connection info in the databag relation + self.kafka_provider.set_bootstrap_server(relation_id, self.kafka.get_bootstrap_server()) + self.kafka_provider.set_credentials(relation_id, username=username, password=password) + self.kafka_provider.set_consumer_group_prefix(relation_id, ...) + self.kafka_provider.set_tls(relation_id, "False") + self.kafka_provider.set_zookeeper_uris(relation_id, ...) + +``` +As shown above, the library provides a custom event (topic_requested) to handle +the situation when an application charm requests a new topic to be created. +It is preferred to subscribe to this event instead of relation changed event to avoid +creating a new topic when other information other than a topic name is +exchanged in the relation databag. +""" + +import json +import logging +from abc import ABC, abstractmethod +from collections import namedtuple +from datetime import datetime +from typing import List, Optional + +from ops.charm import ( + CharmBase, + CharmEvents, + RelationChangedEvent, + RelationEvent, + RelationJoinedEvent, +) +from ops.framework import EventSource, Object +from ops.model import Relation + +# The unique Charmhub library identifier, never change it +LIBID = "6c3e6b6680d64e9c89e611d1a15f65be" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 13 + +PYDEPS = ["ops>=2.0.0"] + +logger = logging.getLogger(__name__) + +Diff = namedtuple("Diff", "added changed deleted") +Diff.__doc__ = """ +A tuple for storing the diff between two data mappings. + +added - keys that were added +changed - keys that still exist but have new values +deleted - key that were deleted""" + + +def diff(event: RelationChangedEvent, bucket: str) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + bucket: bucket of the databag (app or unit) + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + # Retrieve the old data from the data key in the application relation databag. + old_data = json.loads(event.relation.data[bucket].get("data", "{}")) + # Retrieve the new data from the event relation databag. + new_data = { + key: value for key, value in event.relation.data[event.app].items() if key != "data" + } + + # These are the keys that were added to the databag and triggered this event. + added = new_data.keys() - old_data.keys() + # These are the keys that were removed from the databag and triggered this event. + deleted = old_data.keys() - new_data.keys() + # These are the keys that already existed in the databag, + # but had their values changed. + changed = {key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key]} + # Convert the new_data to a serializable format and save it for a next diff check. + event.relation.data[bucket].update({"data": json.dumps(new_data)}) + + # Return the diff with all possible changes. + return Diff(added, changed, deleted) + + +# Base DataProvides and DataRequires + + +class DataProvides(Object, ABC): + """Base provides-side of the data products relation.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + super().__init__(charm, relation_name) + self.charm = charm + self.local_app = self.charm.model.app + self.local_unit = self.charm.unit + self.relation_name = relation_name + self.framework.observe( + charm.on[relation_name].relation_changed, + self._on_relation_changed, + ) + + def _diff(self, event: RelationChangedEvent) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + return diff(event, self.local_app) + + @abstractmethod + def _on_relation_changed(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation data has changed.""" + raise NotImplementedError + + def fetch_relation_data(self) -> dict: + """Retrieves data from relation. + + This function can be used to retrieve data from a relation + in the charm code when outside an event callback. + + Returns: + a dict of the values stored in the relation data bag + for all relation instances (indexed by the relation id). + """ + data = {} + for relation in self.relations: + data[relation.id] = { + key: value for key, value in relation.data[relation.app].items() if key != "data" + } + return data + + def _update_relation_data(self, relation_id: int, data: dict) -> None: + """Updates a set of key-value pairs in the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + data: dict containing the key-value pairs + that should be updated in the relation. + """ + if self.local_unit.is_leader(): + relation = self.charm.model.get_relation(self.relation_name, relation_id) + relation.data[self.local_app].update(data) + + @property + def relations(self) -> List[Relation]: + """The list of Relation instances associated with this relation_name.""" + return list(self.charm.model.relations[self.relation_name]) + + def set_credentials(self, relation_id: int, username: str, password: str) -> None: + """Set credentials. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + username: user that was created. + password: password of the created user. + """ + self._update_relation_data( + relation_id, + { + "username": username, + "password": password, + }, + ) + + def set_tls(self, relation_id: int, tls: str) -> None: + """Set whether TLS is enabled. + + Args: + relation_id: the identifier for a particular relation. + tls: whether tls is enabled (True or False). + """ + self._update_relation_data(relation_id, {"tls": tls}) + + def set_tls_ca(self, relation_id: int, tls_ca: str) -> None: + """Set the TLS CA in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + tls_ca: TLS certification authority. + """ + self._update_relation_data(relation_id, {"tls-ca": tls_ca}) + + +class DataRequires(Object, ABC): + """Requires-side of the relation.""" + + def __init__( + self, + charm, + relation_name: str, + extra_user_roles: str = None, + ): + """Manager of base client relations.""" + super().__init__(charm, relation_name) + self.charm = charm + self.extra_user_roles = extra_user_roles + self.local_app = self.charm.model.app + self.local_unit = self.charm.unit + self.relation_name = relation_name + self.framework.observe( + self.charm.on[relation_name].relation_joined, self._on_relation_joined_event + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, self._on_relation_changed_event + ) + + @abstractmethod + def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None: + """Event emitted when the application joins the relation.""" + raise NotImplementedError + + @abstractmethod + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + raise NotImplementedError + + def fetch_relation_data(self) -> dict: + """Retrieves data from relation. + + This function can be used to retrieve data from a relation + in the charm code when outside an event callback. + Function cannot be used in `*-relation-broken` events and will raise an exception. + + Returns: + a dict of the values stored in the relation data bag + for all relation instances (indexed by the relation ID). + """ + data = {} + for relation in self.relations: + data[relation.id] = { + key: value for key, value in relation.data[relation.app].items() if key != "data" + } + return data + + def _update_relation_data(self, relation_id: int, data: dict) -> None: + """Updates a set of key-value pairs in the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + data: dict containing the key-value pairs + that should be updated in the relation. + """ + if self.local_unit.is_leader(): + relation = self.charm.model.get_relation(self.relation_name, relation_id) + relation.data[self.local_app].update(data) + + def _diff(self, event: RelationChangedEvent) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + return diff(event, self.local_unit) + + @property + def relations(self) -> List[Relation]: + """The list of Relation instances associated with this relation_name.""" + return [ + relation + for relation in self.charm.model.relations[self.relation_name] + if self._is_relation_active(relation) + ] + + @staticmethod + def _is_relation_active(relation: Relation): + try: + _ = repr(relation.data) + return True + except RuntimeError: + return False + + @staticmethod + def _is_resource_created_for_relation(relation: Relation): + return ( + "username" in relation.data[relation.app] and "password" in relation.data[relation.app] + ) + + def is_resource_created(self, relation_id: Optional[int] = None) -> bool: + """Check if the resource has been created. + + This function can be used to check if the Provider answered with data in the charm code + when outside an event callback. + + Args: + relation_id (int, optional): When provided the check is done only for the relation id + provided, otherwise the check is done for all relations + + Returns: + True or False + + Raises: + IndexError: If relation_id is provided but that relation does not exist + """ + if relation_id is not None: + try: + relation = [relation for relation in self.relations if relation.id == relation_id][ + 0 + ] + return self._is_resource_created_for_relation(relation) + except IndexError: + raise IndexError(f"relation id {relation_id} cannot be accessed") + else: + return ( + all( + [ + self._is_resource_created_for_relation(relation) + for relation in self.relations + ] + ) + if self.relations + else False + ) + + +# General events + + +class ExtraRoleEvent(RelationEvent): + """Base class for data events.""" + + @property + def extra_user_roles(self) -> Optional[str]: + """Returns the extra user roles that were requested.""" + return self.relation.data[self.relation.app].get("extra-user-roles") + + +class AuthenticationEvent(RelationEvent): + """Base class for authentication fields for events.""" + + @property + def username(self) -> Optional[str]: + """Returns the created username.""" + return self.relation.data[self.relation.app].get("username") + + @property + def password(self) -> Optional[str]: + """Returns the password for the created user.""" + return self.relation.data[self.relation.app].get("password") + + @property + def tls(self) -> Optional[str]: + """Returns whether TLS is configured.""" + return self.relation.data[self.relation.app].get("tls") + + @property + def tls_ca(self) -> Optional[str]: + """Returns TLS CA.""" + return self.relation.data[self.relation.app].get("tls-ca") + + +# Database related events and fields + + +class DatabaseProvidesEvent(RelationEvent): + """Base class for database events.""" + + @property + def database(self) -> Optional[str]: + """Returns the database that was requested.""" + return self.relation.data[self.relation.app].get("database") + + +class DatabaseRequestedEvent(DatabaseProvidesEvent, ExtraRoleEvent): + """Event emitted when a new database is requested for use on this relation.""" + + +class DatabaseProvidesEvents(CharmEvents): + """Database events. + + This class defines the events that the database can emit. + """ + + database_requested = EventSource(DatabaseRequestedEvent) + + +class DatabaseRequiresEvent(RelationEvent): + """Base class for database events.""" + + @property + def database(self) -> Optional[str]: + """Returns the database name.""" + return self.relation.data[self.relation.app].get("database") + + @property + def endpoints(self) -> Optional[str]: + """Returns a comma separated list of read/write endpoints. + + In VM charms, this is the primary's address. + In kubernetes charms, this is the service to the primary pod. + """ + return self.relation.data[self.relation.app].get("endpoints") + + @property + def read_only_endpoints(self) -> Optional[str]: + """Returns a comma separated list of read only endpoints. + + In VM charms, this is the address of all the secondary instances. + In kubernetes charms, this is the service to all replica pod instances. + """ + return self.relation.data[self.relation.app].get("read-only-endpoints") + + @property + def replset(self) -> Optional[str]: + """Returns the replicaset name. + + MongoDB only. + """ + return self.relation.data[self.relation.app].get("replset") + + @property + def uris(self) -> Optional[str]: + """Returns the connection URIs. + + MongoDB, Redis, OpenSearch. + """ + return self.relation.data[self.relation.app].get("uris") + + @property + def version(self) -> Optional[str]: + """Returns the version of the database. + + Version as informed by the database daemon. + """ + return self.relation.data[self.relation.app].get("version") + + +class DatabaseCreatedEvent(AuthenticationEvent, DatabaseRequiresEvent): + """Event emitted when a new database is created for use on this relation.""" + + +class DatabaseEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent): + """Event emitted when the read/write endpoints are changed.""" + + +class DatabaseReadOnlyEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent): + """Event emitted when the read only endpoints are changed.""" + + +class DatabaseRequiresEvents(CharmEvents): + """Database events. + + This class defines the events that the database can emit. + """ + + database_created = EventSource(DatabaseCreatedEvent) + endpoints_changed = EventSource(DatabaseEndpointsChangedEvent) + read_only_endpoints_changed = EventSource(DatabaseReadOnlyEndpointsChangedEvent) + + +# Database Provider and Requires + + +class DatabaseProvides(DataProvides): + """Provider-side of the database relations.""" + + on = DatabaseProvidesEvents() + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + super().__init__(charm, relation_name) + + def _on_relation_changed(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Only the leader should handle this event. + if not self.local_unit.is_leader(): + return + + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit a database requested event if the setup key (database name and optional + # extra user roles) was added to the relation databag by the application. + if "database" in diff.added: + self.on.database_requested.emit(event.relation, app=event.app, unit=event.unit) + + def set_database(self, relation_id: int, database_name: str) -> None: + """Set database name. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + database_name: database name. + """ + self._update_relation_data(relation_id, {"database": database_name}) + + def set_endpoints(self, relation_id: int, connection_strings: str) -> None: + """Set database primary connections. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + In VM charms, only the primary's address should be passed as an endpoint. + In kubernetes charms, the service endpoint to the primary pod should be + passed as an endpoint. + + Args: + relation_id: the identifier for a particular relation. + connection_strings: database hosts and ports comma separated list. + """ + self._update_relation_data(relation_id, {"endpoints": connection_strings}) + + def set_read_only_endpoints(self, relation_id: int, connection_strings: str) -> None: + """Set database replicas connection strings. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + connection_strings: database hosts and ports comma separated list. + """ + self._update_relation_data(relation_id, {"read-only-endpoints": connection_strings}) + + def set_replset(self, relation_id: int, replset: str) -> None: + """Set replica set name in the application relation databag. + + MongoDB only. + + Args: + relation_id: the identifier for a particular relation. + replset: replica set name. + """ + self._update_relation_data(relation_id, {"replset": replset}) + + def set_uris(self, relation_id: int, uris: str) -> None: + """Set the database connection URIs in the application relation databag. + + MongoDB, Redis, and OpenSearch only. + + Args: + relation_id: the identifier for a particular relation. + uris: connection URIs. + """ + self._update_relation_data(relation_id, {"uris": uris}) + + def set_version(self, relation_id: int, version: str) -> None: + """Set the database version in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + version: database version. + """ + self._update_relation_data(relation_id, {"version": version}) + + +class DatabaseRequires(DataRequires): + """Requires-side of the database relation.""" + + on = DatabaseRequiresEvents() + + def __init__( + self, + charm, + relation_name: str, + database_name: str, + extra_user_roles: str = None, + relations_aliases: List[str] = None, + ): + """Manager of database client relations.""" + super().__init__(charm, relation_name, extra_user_roles) + self.database = database_name + self.relations_aliases = relations_aliases + + # Define custom event names for each alias. + if relations_aliases: + # Ensure the number of aliases does not exceed the maximum + # of connections allowed in the specific relation. + relation_connection_limit = self.charm.meta.requires[relation_name].limit + if len(relations_aliases) != relation_connection_limit: + raise ValueError( + f"The number of aliases must match the maximum number of connections allowed in the relation. " + f"Expected {relation_connection_limit}, got {len(relations_aliases)}" + ) + + for relation_alias in relations_aliases: + self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent) + self.on.define_event( + f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent + ) + self.on.define_event( + f"{relation_alias}_read_only_endpoints_changed", + DatabaseReadOnlyEndpointsChangedEvent, + ) + + def _assign_relation_alias(self, relation_id: int) -> None: + """Assigns an alias to a relation. + + This function writes in the unit data bag. + + Args: + relation_id: the identifier for a particular relation. + """ + # If no aliases were provided, return immediately. + if not self.relations_aliases: + return + + # Return if an alias was already assigned to this relation + # (like when there are more than one unit joining the relation). + if ( + self.charm.model.get_relation(self.relation_name, relation_id) + .data[self.local_unit] + .get("alias") + ): + return + + # Retrieve the available aliases (the ones that weren't assigned to any relation). + available_aliases = self.relations_aliases[:] + for relation in self.charm.model.relations[self.relation_name]: + alias = relation.data[self.local_unit].get("alias") + if alias: + logger.debug("Alias %s was already assigned to relation %d", alias, relation.id) + available_aliases.remove(alias) + + # Set the alias in the unit relation databag of the specific relation. + relation = self.charm.model.get_relation(self.relation_name, relation_id) + relation.data[self.local_unit].update({"alias": available_aliases[0]}) + + def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None: + """Emit an aliased event to a particular relation if it has an alias. + + Args: + event: the relation changed event that was received. + event_name: the name of the event to emit. + """ + alias = self._get_relation_alias(event.relation.id) + if alias: + getattr(self.on, f"{alias}_{event_name}").emit( + event.relation, app=event.app, unit=event.unit + ) + + def _get_relation_alias(self, relation_id: int) -> Optional[str]: + """Returns the relation alias. + + Args: + relation_id: the identifier for a particular relation. + + Returns: + the relation alias or None if the relation was not found. + """ + for relation in self.charm.model.relations[self.relation_name]: + if relation.id == relation_id: + return relation.data[self.local_unit].get("alias") + return None + + def is_postgresql_plugin_enabled(self, plugin: str, relation_index: int = 0) -> bool: + """Returns whether a plugin is enabled in the database. + + Args: + plugin: name of the plugin to check. + relation_index: optional relation index to check the database + (default: 0 - first relation). + + PostgreSQL only. + """ + # Psycopg 3 is imported locally to avoid the need of its package installation + # when relating to a database charm other than PostgreSQL. + import psycopg + + # Return False if no relation is established. + if len(self.relations) == 0: + return False + + relation_data = self.fetch_relation_data()[self.relations[relation_index].id] + host = relation_data.get("endpoints") + + # Return False if there is no endpoint available. + if host is None: + return False + + host = host.split(":")[0] + user = relation_data.get("username") + password = relation_data.get("password") + connection_string = ( + f"host='{host}' dbname='{self.database}' user='{user}' password='{password}'" + ) + try: + with psycopg.connect(connection_string) as connection: + with connection.cursor() as cursor: + cursor.execute(f"SELECT TRUE FROM pg_extension WHERE extname='{plugin}';") + return cursor.fetchone() is not None + except psycopg.Error as e: + logger.exception( + f"failed to check whether {plugin} plugin is enabled in the database: %s", str(e) + ) + return False + + def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None: + """Event emitted when the application joins the database relation.""" + # If relations aliases were provided, assign one to the relation. + self._assign_relation_alias(event.relation.id) + + # Sets both database and extra user roles in the relation + # if the roles are provided. Otherwise, sets only the database. + if self.extra_user_roles: + self._update_relation_data( + event.relation.id, + { + "database": self.database, + "extra-user-roles": self.extra_user_roles, + }, + ) + else: + self._update_relation_data(event.relation.id, {"database": self.database}) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the database relation has changed.""" + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Check if the database is created + # (the database charm shared the credentials). + if "username" in diff.added and "password" in diff.added: + # Emit the default event (the one without an alias). + logger.info("database created at %s", datetime.now()) + self.on.database_created.emit(event.relation, app=event.app, unit=event.unit) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "database_created") + + # To avoid unnecessary application restarts do not trigger + # “endpoints_changed“ event if “database_created“ is triggered. + return + + # Emit an endpoints changed event if the database + # added or changed this info in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + self.on.endpoints_changed.emit(event.relation, app=event.app, unit=event.unit) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "endpoints_changed") + + # To avoid unnecessary application restarts do not trigger + # “read_only_endpoints_changed“ event if “endpoints_changed“ is triggered. + return + + # Emit a read only endpoints changed event if the database + # added or changed this info in the relation databag. + if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("read-only-endpoints changed on %s", datetime.now()) + self.on.read_only_endpoints_changed.emit( + event.relation, app=event.app, unit=event.unit + ) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "read_only_endpoints_changed") + + +# Kafka related events + + +class KafkaProvidesEvent(RelationEvent): + """Base class for Kafka events.""" + + @property + def topic(self) -> Optional[str]: + """Returns the topic that was requested.""" + return self.relation.data[self.relation.app].get("topic") + + @property + def consumer_group_prefix(self) -> Optional[str]: + """Returns the consumer-group-prefix that was requested.""" + return self.relation.data[self.relation.app].get("consumer-group-prefix") + + +class TopicRequestedEvent(KafkaProvidesEvent, ExtraRoleEvent): + """Event emitted when a new topic is requested for use on this relation.""" + + +class KafkaProvidesEvents(CharmEvents): + """Kafka events. + + This class defines the events that the Kafka can emit. + """ + + topic_requested = EventSource(TopicRequestedEvent) + + +class KafkaRequiresEvent(RelationEvent): + """Base class for Kafka events.""" + + @property + def topic(self) -> Optional[str]: + """Returns the topic.""" + return self.relation.data[self.relation.app].get("topic") + + @property + def bootstrap_server(self) -> Optional[str]: + """Returns a comma-separated list of broker uris.""" + return self.relation.data[self.relation.app].get("endpoints") + + @property + def consumer_group_prefix(self) -> Optional[str]: + """Returns the consumer-group-prefix.""" + return self.relation.data[self.relation.app].get("consumer-group-prefix") + + @property + def zookeeper_uris(self) -> Optional[str]: + """Returns a comma separated list of Zookeeper uris.""" + return self.relation.data[self.relation.app].get("zookeeper-uris") + + +class TopicCreatedEvent(AuthenticationEvent, KafkaRequiresEvent): + """Event emitted when a new topic is created for use on this relation.""" + + +class BootstrapServerChangedEvent(AuthenticationEvent, KafkaRequiresEvent): + """Event emitted when the bootstrap server is changed.""" + + +class KafkaRequiresEvents(CharmEvents): + """Kafka events. + + This class defines the events that the Kafka can emit. + """ + + topic_created = EventSource(TopicCreatedEvent) + bootstrap_server_changed = EventSource(BootstrapServerChangedEvent) + + +# Kafka Provides and Requires + + +class KafkaProvides(DataProvides): + """Provider-side of the Kafka relation.""" + + on = KafkaProvidesEvents() + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + super().__init__(charm, relation_name) + + def _on_relation_changed(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Only the leader should handle this event. + if not self.local_unit.is_leader(): + return + + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit a topic requested event if the setup key (topic name and optional + # extra user roles) was added to the relation databag by the application. + if "topic" in diff.added: + self.on.topic_requested.emit(event.relation, app=event.app, unit=event.unit) + + def set_topic(self, relation_id: int, topic: str) -> None: + """Set topic name in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + topic: the topic name. + """ + self._update_relation_data(relation_id, {"topic": topic}) + + def set_bootstrap_server(self, relation_id: int, bootstrap_server: str) -> None: + """Set the bootstrap server in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + bootstrap_server: the bootstrap server address. + """ + self._update_relation_data(relation_id, {"endpoints": bootstrap_server}) + + def set_consumer_group_prefix(self, relation_id: int, consumer_group_prefix: str) -> None: + """Set the consumer group prefix in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + consumer_group_prefix: the consumer group prefix string. + """ + self._update_relation_data(relation_id, {"consumer-group-prefix": consumer_group_prefix}) + + def set_zookeeper_uris(self, relation_id: int, zookeeper_uris: str) -> None: + """Set the zookeeper uris in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + zookeeper_uris: comma-separated list of ZooKeeper server uris. + """ + self._update_relation_data(relation_id, {"zookeeper-uris": zookeeper_uris}) + + +class KafkaRequires(DataRequires): + """Requires-side of the Kafka relation.""" + + on = KafkaRequiresEvents() + + def __init__( + self, + charm, + relation_name: str, + topic: str, + extra_user_roles: Optional[str] = None, + consumer_group_prefix: Optional[str] = None, + ): + """Manager of Kafka client relations.""" + # super().__init__(charm, relation_name) + super().__init__(charm, relation_name, extra_user_roles) + self.charm = charm + self.topic = topic + self.consumer_group_prefix = consumer_group_prefix or "" + + @property + def topic(self): + """Topic to use in Kafka.""" + return self._topic + + @topic.setter + def topic(self, value): + # Avoid wildcards + if value == "*": + raise ValueError(f"Error on topic '{value}', cannot be a wildcard.") + self._topic = value + + def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None: + """Event emitted when the application joins the Kafka relation.""" + # Sets topic, extra user roles, and "consumer-group-prefix" in the relation + relation_data = { + f: getattr(self, f.replace("-", "_"), "") + for f in ["consumer-group-prefix", "extra-user-roles", "topic"] + } + + self._update_relation_data(event.relation.id, relation_data) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the Kafka relation has changed.""" + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Check if the topic is created + # (the Kafka charm shared the credentials). + if "username" in diff.added and "password" in diff.added: + # Emit the default event (the one without an alias). + logger.info("topic created at %s", datetime.now()) + self.on.topic_created.emit(event.relation, app=event.app, unit=event.unit) + + # To avoid unnecessary application restarts do not trigger + # “endpoints_changed“ event if “topic_created“ is triggered. + return + + # Emit an endpoints (bootstrap-server) changed event if the Kafka endpoints + # added or changed this info in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + self.on.bootstrap_server_changed.emit( + event.relation, app=event.app, unit=event.unit + ) # here check if this is the right design + return + + +# Opensearch related events + + +class OpenSearchProvidesEvent(RelationEvent): + """Base class for OpenSearch events.""" + + @property + def index(self) -> Optional[str]: + """Returns the index that was requested.""" + return self.relation.data[self.relation.app].get("index") + + +class IndexRequestedEvent(OpenSearchProvidesEvent, ExtraRoleEvent): + """Event emitted when a new index is requested for use on this relation.""" + + +class OpenSearchProvidesEvents(CharmEvents): + """OpenSearch events. + + This class defines the events that OpenSearch can emit. + """ + + index_requested = EventSource(IndexRequestedEvent) + + +class OpenSearchRequiresEvent(DatabaseRequiresEvent): + """Base class for OpenSearch requirer events.""" + + +class IndexCreatedEvent(AuthenticationEvent, OpenSearchRequiresEvent): + """Event emitted when a new index is created for use on this relation.""" + + +class OpenSearchRequiresEvents(CharmEvents): + """OpenSearch events. + + This class defines the events that the opensearch requirer can emit. + """ + + index_created = EventSource(IndexCreatedEvent) + endpoints_changed = EventSource(DatabaseEndpointsChangedEvent) + authentication_updated = EventSource(AuthenticationEvent) + + +# OpenSearch Provides and Requires Objects + + +class OpenSearchProvides(DataProvides): + """Provider-side of the OpenSearch relation.""" + + on = OpenSearchProvidesEvents() + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + super().__init__(charm, relation_name) + + def _on_relation_changed(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Only the leader should handle this event. + if not self.local_unit.is_leader(): + return + + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit an index requested event if the setup key (index name and optional extra user roles) + # have been added to the relation databag by the application. + if "index" in diff.added: + self.on.index_requested.emit(event.relation, app=event.app, unit=event.unit) + + def set_index(self, relation_id: int, index: str) -> None: + """Set the index in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + index: the index as it is _created_ on the provider charm. This needn't match the + requested index, and can be used to present a different index name if, for example, + the requested index is invalid. + """ + self._update_relation_data(relation_id, {"index": index}) + + def set_endpoints(self, relation_id: int, endpoints: str) -> None: + """Set the endpoints in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + endpoints: the endpoint addresses for opensearch nodes. + """ + self._update_relation_data(relation_id, {"endpoints": endpoints}) + + def set_version(self, relation_id: int, version: str) -> None: + """Set the opensearch version in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + version: database version. + """ + self._update_relation_data(relation_id, {"version": version}) + + +class OpenSearchRequires(DataRequires): + """Requires-side of the OpenSearch relation.""" + + on = OpenSearchRequiresEvents() + + def __init__( + self, charm, relation_name: str, index: str, extra_user_roles: Optional[str] = None + ): + """Manager of OpenSearch client relations.""" + super().__init__(charm, relation_name, extra_user_roles) + self.charm = charm + self.index = index + + def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None: + """Event emitted when the application joins the OpenSearch relation.""" + # Sets both index and extra user roles in the relation if the roles are provided. + # Otherwise, sets only the index. + data = {"index": self.index} + if self.extra_user_roles: + data["extra-user-roles"] = self.extra_user_roles + + self._update_relation_data(event.relation.id, data) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the OpenSearch relation has changed. + + This event triggers individual custom events depending on the changing relation. + """ + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Check if authentication has updated, emit event if so + updates = {"username", "password", "tls", "tls-ca"} + if len(set(diff._asdict().keys()) - updates) < len(diff): + logger.info("authentication updated at: %s", datetime.now()) + self.on.authentication_updated.emit(event.relation, app=event.app, unit=event.unit) + + # Check if the index is created + # (the OpenSearch charm shares the credentials). + if "username" in diff.added and "password" in diff.added: + # Emit the default event (the one without an alias). + logger.info("index created at: %s", datetime.now()) + self.on.index_created.emit(event.relation, app=event.app, unit=event.unit) + + # To avoid unnecessary application restarts do not trigger + # “endpoints_changed“ event if “index_created“ is triggered. + return + + # Emit a endpoints changed event if the OpenSearch application added or changed this info + # in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + self.on.endpoints_changed.emit( + event.relation, app=event.app, unit=event.unit + ) # here check if this is the right design + return diff --git a/metadata.yaml b/metadata.yaml index 53797d50..97ac6dde 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -38,6 +38,10 @@ requires: interface: ingress limit: 1 optional: true + database: + interface: postgresql_client + limit: 1 + optional: true nginx-route: interface: nginx-route limit: 1 diff --git a/requirements.txt b/requirements.txt index fb23a0d3..ba67c2ed 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,5 @@ jsonschema == 4.17.3 ops >= 2.2.0 -pydantic==1.10.10 +pydantic == 1.10.10 +ops-lib-pgsql >= 1.4 +psycopg2-binary == 2.9.6 diff --git a/src/charm.py b/src/charm.py index 1c5b4a22..c730ab98 100755 --- a/src/charm.py +++ b/src/charm.py @@ -9,21 +9,18 @@ import typing import ops +import psycopg2 from charms.nginx_ingress_integrator.v0.nginx_route import require_nginx_route from charms.traefik_k8s.v1.ingress import IngressPerAppRequirer from ops.charm import ActionEvent from ops.main import main -from charm_state import CharmState -from constants import ( - CHECK_READY_NAME, - SYNAPSE_COMMAND_PATH, - SYNAPSE_CONTAINER_NAME, - SYNAPSE_PORT, - SYNAPSE_SERVICE_NAME, -) -from exceptions import CharmConfigInvalidError, CommandMigrateConfigError, ServerNameModifiedError -from synapse import Synapse +from charm_state import CharmConfigInvalidError, CharmState +from constants import SYNAPSE_CONTAINER_NAME, SYNAPSE_PORT +from database_client import DatabaseClient +from database_observer import DatabaseObserver +from pebble import PebbleService +from synapse import CommandMigrateConfigError, ServerNameModifiedError, Synapse logger = logging.getLogger(__name__) @@ -38,12 +35,14 @@ def __init__(self, *args: typing.Any) -> None: args: class arguments. """ super().__init__(*args) + self.database = DatabaseObserver(self) try: self._charm_state = CharmState.from_charm(charm=self) except CharmConfigInvalidError as exc: self.model.unit.status = ops.BlockedStatus(exc.msg) return self._synapse = Synapse(charm_state=self._charm_state) + self.pebble_service = PebbleService(synapse=self._synapse) # service-hostname is a required field so we're hardcoding to the same # value as service-name. service-hostname should be set via Nginx # Ingress Integrator charm config. @@ -66,20 +65,15 @@ def __init__(self, *args: typing.Any) -> None: self.framework.observe(self.on.reset_instance_action, self._on_reset_instance_action) self.framework.observe(self.on.synapse_pebble_ready, self._on_pebble_ready) - def _change_config(self, event: ops.HookEvent) -> None: - """Change the configuration. - - Args: - event: Event triggering the need of changing the configuration. - """ + def change_config(self, _: ops.HookEvent) -> None: + """Change configuration.""" container = self.unit.get_container(SYNAPSE_CONTAINER_NAME) if not container.can_connect(): - event.defer() - self.unit.status = ops.WaitingStatus("Waiting for pebble") + self.unit.status = ops.MaintenanceStatus("Waiting for pebble") return + self.model.unit.status = ops.MaintenanceStatus("Configuring Synapse") try: - self.model.unit.status = ops.MaintenanceStatus("Configuring Synapse") - self._synapse.execute_migrate_config(container) + self.pebble_service.change_config(container) except ( CharmConfigInvalidError, CommandMigrateConfigError, @@ -88,9 +82,7 @@ def _change_config(self, event: ops.HookEvent) -> None: ) as exc: self.model.unit.status = ops.BlockedStatus(str(exc)) return - container.add_layer(SYNAPSE_CONTAINER_NAME, self._pebble_layer, combine=True) - container.replan() - self.unit.status = ops.ActiveStatus() + self.model.unit.status = ops.ActiveStatus() def _on_config_changed(self, event: ops.HookEvent) -> None: """Handle changed configuration. @@ -98,7 +90,7 @@ def _on_config_changed(self, event: ops.HookEvent) -> None: Args: event: Event triggering after config is changed. """ - self._change_config(event) + self.change_config(event) def _on_pebble_ready(self, event: ops.HookEvent) -> None: """Handle pebble ready event. @@ -106,28 +98,7 @@ def _on_pebble_ready(self, event: ops.HookEvent) -> None: Args: event: Event triggering after pebble is ready. """ - self._change_config(event) - - @property - def _pebble_layer(self) -> ops.pebble.LayerDict: - """Return a dictionary representing a Pebble layer.""" - layer = { - "summary": "Synapse layer", - "description": "pebble config layer for Synapse", - "services": { - SYNAPSE_SERVICE_NAME: { - "override": "replace", - "summary": "Synapse application service", - "startup": "enabled", - "command": SYNAPSE_COMMAND_PATH, - "environment": self._synapse.synapse_environment(), - } - }, - "checks": { - CHECK_READY_NAME: self._synapse.check_ready(), - }, - } - return typing.cast(ops.pebble.LayerDict, layer) + self.change_config(event) def _on_reset_instance_action(self, event: ActionEvent) -> None: """Reset instance and report action result. @@ -145,18 +116,27 @@ def _on_reset_instance_action(self, event: ActionEvent) -> None: if not container.can_connect(): event.fail("Failed to connect to container") return - self.model.unit.status = ops.MaintenanceStatus("Resetting Synapse instance") try: - self._synapse.reset_instance(container) + self.model.unit.status = ops.MaintenanceStatus("Resetting Synapse instance") + self.pebble_service.reset_instance(container) + datasource = self.database.get_relation_as_datasource() + if datasource is not None: + logger.info("Erase Synapse database") + # Connecting to template1 to make it possible to erase the database. + # Otherwise PostgreSQL will prevent it if there are open connections. + db_client = DatabaseClient(datasource=datasource, alternative_database="template1") + db_client.erase() self._synapse.execute_migrate_config(container) + logger.info("Start Synapse database") + self.pebble_service.replan(container) results["reset-instance"] = True - except (ops.pebble.PathError, CommandMigrateConfigError) as exc: + except (psycopg2.Error, ops.pebble.PathError, CommandMigrateConfigError) as exc: self.model.unit.status = ops.BlockedStatus(str(exc)) event.fail(str(exc)) return - self.model.unit.status = ops.ActiveStatus() # results is a dict and set_results expects _SerializedData event.set_results(results) # type: ignore[arg-type] + self.model.unit.status = ops.ActiveStatus() if __name__ == "__main__": # pragma: nocover diff --git a/src/charm_state.py b/src/charm_state.py index fd4a4cdf..33c12413 100644 --- a/src/charm_state.py +++ b/src/charm_state.py @@ -16,7 +16,7 @@ validator, ) -from exceptions import CharmConfigInvalidError +from charm_types import DatasourcePostgreSQL if typing.TYPE_CHECKING: from charm import SynapseCharm @@ -28,6 +28,22 @@ ) +class CharmConfigInvalidError(Exception): + """Exception raised when a charm configuration is found to be invalid. + + Attrs: + msg (str): Explanation of the error. + """ + + def __init__(self, msg: str): + """Initialize a new instance of the CharmConfigInvalidError exception. + + Args: + msg (str): Explanation of the error. + """ + self.msg = msg + + class SynapseConfig(BaseModel): # pylint: disable=too-few-public-methods """Represent Synapse builtin configuration values. @@ -70,19 +86,20 @@ class CharmState: Attrs: server_name: server_name config. report_stats: report_stats config. + datasource: datasource information. """ def __init__( - self, - *, - synapse_config: SynapseConfig, + self, *, synapse_config: SynapseConfig, datasource: typing.Optional[DatasourcePostgreSQL] ) -> None: """Construct. Args: synapse_config: The value of the synapse_config charm configuration. + datasource: Datasource information. """ self._synapse_config = synapse_config + self._datasource = datasource @property def server_name(self) -> typing.Optional[str]: @@ -102,6 +119,15 @@ def report_stats(self) -> typing.Union[str, bool, None]: """ return self._synapse_config.report_stats + @property + def datasource(self) -> typing.Union[DatasourcePostgreSQL, None]: + """Return datasource. + + Returns: + datasource or None. + """ + return self._datasource + @classmethod def from_charm(cls, charm: "SynapseCharm") -> "CharmState": """Initialize a new instance of the CharmState class from the associated charm. @@ -124,4 +150,7 @@ def from_charm(cls, charm: "SynapseCharm") -> "CharmState": ) error_field_str = " ".join(f"{f}" for f in error_fields) raise CharmConfigInvalidError(f"invalid configuration: {error_field_str}") from exc - return cls(synapse_config=valid_synapse_config) + return cls( + synapse_config=valid_synapse_config, + datasource=charm.database.get_relation_as_datasource(), + ) diff --git a/src/charm_types.py b/src/charm_types.py index 1fed5f2a..11b2cf9d 100644 --- a/src/charm_types.py +++ b/src/charm_types.py @@ -8,15 +8,19 @@ import typing -class ExecResult(typing.NamedTuple): - """A named tuple representing the result of executing a command. +class DatasourcePostgreSQL(typing.TypedDict): + """A named tuple representing a Datasource PostgreSQL. Attributes: - exit_code: The exit status of the command (0 for success, non-zero for failure). - stdout: The standard output of the command as a string. - stderr: The standard error output of the command as a string. + user: User. + password: Password. + host: Host (IP or DNS without port or protocol). + port: Port. + db: Database name. """ - exit_code: int - stdout: str - stderr: str + user: str + password: str + host: str + port: str + db: str diff --git a/src/database_client.py b/src/database_client.py new file mode 100644 index 00000000..48ca18ab --- /dev/null +++ b/src/database_client.py @@ -0,0 +1,117 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""The DatabaseClient class.""" +import logging +import typing + +import psycopg2 +from psycopg2 import sql +from psycopg2.extensions import connection + +from charm_types import DatasourcePostgreSQL +from exceptions import CharmDatabaseRelationNotFoundError + +logger = logging.getLogger(__name__) + + +class DatabaseClient: + """A class representing the Synapse application.""" + + def __init__( + self, datasource: typing.Optional[DatasourcePostgreSQL], alternative_database: str = "" + ): + """Initialize a new instance of the Synapse class. + + Args: + datasource: datasource to use to connect. + alternative_database: database to connect to. + The default is to use the one provided by datasource. + + Raises: + CharmDatabaseRelationNotFoundError: if there is no relation. + """ + if datasource is None: + raise CharmDatabaseRelationNotFoundError("No database relation was found.") + self._datasource = datasource + self._database_name = datasource["db"] + self._alternative_database = alternative_database + self._conn: connection = None + + def _connect(self) -> None: + """Get connection. + + Raises: + Error: something went wrong while connecting to the database. + """ + if self._conn is None or self._conn.closed != 0: + logger.debug("Connecting to database") + try: + user = self._datasource["user"] + password = self._datasource["password"] + host = self._datasource["host"] + database_name = ( + self._alternative_database + if self._alternative_database + else self._datasource["db"] + ) + self._conn = psycopg2.connect( + f"dbname='{database_name}' user='{user}' host='{host}'" + f" password='{password}' connect_timeout=5" + ) + self._conn.autocommit = True + except psycopg2.Error as exc: + logger.exception("Failed to connect to database: %s", str(exc)) + raise + + def _close(self) -> None: + """Close database connection.""" + if self._conn is not None: + self._conn.close() + self._conn = None + + def prepare(self) -> None: + """Change database collate and ctype as required by Synapse. + + Raises: + Error: something went wrong while preparing the database. + """ + try: + self._connect() + with self._conn.cursor() as curs: + curs.execute( + sql.SQL( + "UPDATE pg_database SET datcollate='C', datctype='C' WHERE datname = {}" + ).format(sql.Literal(self._database_name)) + ) + except psycopg2.Error as exc: + logger.error("Failed to prepare database: %s", str(exc)) + raise + finally: + self._close() + + def erase(self) -> None: + """Erase database. + + Raises: + Error: something went wrong while erasing the database. + """ + # Since is not possible to delete the database while connected to it + # this connection will use the template1 database, provided by PostgreSQL. + try: + self._connect() + with self._conn.cursor() as curs: + curs.execute( + sql.SQL("DROP DATABASE {}").format(sql.Identifier(self._database_name)) + ) + curs.execute( + sql.SQL( + "CREATE DATABASE {} " + "WITH LC_CTYPE = 'C' LC_COLLATE='C' TEMPLATE='template0';" + ).format(sql.Identifier(self._database_name)) + ) + except psycopg2.Error as exc: + logger.error("Failed to erase database: %s", str(exc)) + raise + finally: + self._close() diff --git a/src/database_observer.py b/src/database_observer.py new file mode 100644 index 00000000..8191eb3c --- /dev/null +++ b/src/database_observer.py @@ -0,0 +1,132 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""The Database agent relation observer.""" +import logging +import typing + +import ops +from charms.data_platform_libs.v0.data_interfaces import ( + DatabaseCreatedEvent, + DatabaseEndpointsChangedEvent, + DatabaseRequires, +) +from ops.charm import CharmBase +from ops.framework import Object + +from charm_types import DatasourcePostgreSQL +from constants import SYNAPSE_CONTAINER_NAME +from database_client import DatabaseClient +from exceptions import CharmDatabaseRelationNotFoundError + +logger = logging.getLogger(__name__) + + +class DatabaseObserver(Object): + """The Database relation observer. + + Attrs: + _pebble_service: instance of pebble service. + """ + + _RELATION_NAME = "database" + + def __init__(self, charm: CharmBase): + """Initialize the observer and register event handlers. + + Args: + charm: The parent charm to attach the observer to. + """ + super().__init__(charm, "database-observer") + self._charm = charm + # SUPERUSER is required to update pg_database + self.database = DatabaseRequires( + self._charm, + relation_name=self._RELATION_NAME, + database_name=self._charm.app.name, + extra_user_roles="SUPERUSER", + ) + self.framework.observe(self.database.on.database_created, self._on_database_created) + self.framework.observe(self.database.on.endpoints_changed, self._on_endpoints_changed) + + @property + def _pebble_service(self) -> typing.Any: + """Return instance of pebble service. + + Returns: + instance of pebble service or none. + """ + return getattr(self._charm, "pebble_service", None) + + def _change_config(self, _: ops.HookEvent) -> None: + """Change the configuration.""" + container = self._charm.unit.get_container(SYNAPSE_CONTAINER_NAME) + if not container.can_connect() or self._pebble_service is None: + self._charm.unit.status = ops.MaintenanceStatus("Waiting for pebble") + return + try: + self._pebble_service.change_config(container) + # Avoiding duplication of code with _change_config in charm.py + except Exception as exc: # pylint: disable=broad-exception-caught + self._charm.model.unit.status = ops.BlockedStatus(f"Database failed: {exc}") + return + self._charm.unit.status = ops.ActiveStatus() + + def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + """Handle database created. + + Args: + event: Event triggering the database created handler. + """ + self.model.unit.status = ops.MaintenanceStatus("Preparing the database") + # In case of psycopg2.Error, Juju will set ErrorStatus + # See discussion here: + # https://github.com/canonical/synapse-operator/pull/13#discussion_r1253285244 + datasource = self.get_relation_as_datasource() + db_client = DatabaseClient(datasource=datasource) + db_client.prepare() + self._change_config(event) + + def _on_endpoints_changed(self, event: DatabaseEndpointsChangedEvent) -> None: + """Handle endpoints change. + + Args: + event: Event triggering the endpoints changed handler. + """ + self._change_config(event) + + def get_relation_as_datasource(self) -> typing.Optional[DatasourcePostgreSQL]: + """Get database data from relation. + + Returns: + Dict: Information needed for setting environment variables. + """ + if self.model.get_relation(self._RELATION_NAME) is None: + return None + + relation_id = self.database.relations[0].id + relation_data = self.database.fetch_relation_data()[relation_id] + + endpoint = relation_data.get("endpoints", ":") + + return DatasourcePostgreSQL( + user=relation_data.get("username", ""), + password=relation_data.get("password", ""), + host=endpoint.split(":")[0], + port=endpoint.split(":")[1], + db=self._charm.app.name, + ) + + def get_database_name(self) -> str: + """Get database name. + + Raises: + CharmDatabaseRelationNotFoundError: if there is no relation. + + Returns: + str: database name. + """ + datasource = self.get_relation_as_datasource() + if datasource is None: + raise CharmDatabaseRelationNotFoundError("No database relation was found.") + return datasource["db"] diff --git a/src/exceptions.py b/src/exceptions.py index bcea4e1c..eb21e8e9 100644 --- a/src/exceptions.py +++ b/src/exceptions.py @@ -6,49 +6,15 @@ """Exceptions used by the Synapse charm.""" -class CommandMigrateConfigError(Exception): - """Exception raised when a charm configuration is found to be invalid. +class CharmDatabaseRelationNotFoundError(Exception): + """Exception raised when there is no database relation. Attrs: msg (str): Explanation of the error. """ def __init__(self, msg: str): - """Initialize a new instance of the CommandMigrateConfigError exception. - - Args: - msg (str): Explanation of the error. - """ - self.msg = msg - - -class CharmConfigInvalidError(Exception): - """Exception raised when a charm configuration is found to be invalid. - - Attrs: - msg (str): Explanation of the error. - """ - - def __init__(self, msg: str): - """Initialize a new instance of the CharmConfigInvalidError exception. - - Args: - msg (str): Explanation of the error. - """ - self.msg = msg - - -class ServerNameModifiedError(Exception): - """Exception raised while checking configuration file. - - Raised if server_name from state is different than the one in the configuration file. - - Attrs: - msg (str): Explanation of the error. - """ - - def __init__(self, msg: str): - """Initialize a new instance of the ServerNameModifiedError exception. + """Initialize a new instance of the CharmDatabaseRelationNotFoundError exception. Args: msg (str): Explanation of the error. diff --git a/src/pebble.py b/src/pebble.py new file mode 100644 index 00000000..3f516f37 --- /dev/null +++ b/src/pebble.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python3 + +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Class to interact with pebble.""" + +import logging +import typing + +import ops + +from constants import ( + CHECK_READY_NAME, + SYNAPSE_COMMAND_PATH, + SYNAPSE_CONTAINER_NAME, + SYNAPSE_SERVICE_NAME, +) +from synapse import Synapse + +logger = logging.getLogger(__name__) + + +class PebbleService: + """The charm pebble service manager.""" + + def __init__(self, synapse: Synapse): + """Initialize the pebble service. + + Args: + synapse: Instance to interact with Synapse. + """ + self._synapse = synapse + + def replan(self, container: ops.model.Container) -> None: + """Replan the pebble service. + + Args: + container: Charm container. + """ + container.add_layer(SYNAPSE_CONTAINER_NAME, self._pebble_layer, combine=True) + container.replan() + + def change_config(self, container: ops.model.Container) -> None: + """Change the configuration. + + Args: + container: Charm container. + """ + self._synapse.execute_migrate_config(container) + self.replan(container) + + def reset_instance(self, container: ops.model.Container) -> None: + """Reset instance. + + Args: + container: Charm container. + """ + # This is needed in the case of relation with Postgresql. + # If there is open connections it won't be possible to drop the database. + logger.info("Replan service to not restart") + container.add_layer( + SYNAPSE_CONTAINER_NAME, self._pebble_layer_without_restart, combine=True + ) + container.replan() + logger.info("Stop Synapse instance") + container.stop(SYNAPSE_SERVICE_NAME) + logger.info("Erase Synapse data") + self._synapse.reset_instance(container) + + @property + def _pebble_layer(self) -> ops.pebble.LayerDict: + """Return a dictionary representing a Pebble layer.""" + layer = { + "summary": "Synapse layer", + "description": "pebble config layer for Synapse", + "services": { + SYNAPSE_SERVICE_NAME: { + "override": "replace", + "summary": "Synapse application service", + "startup": "enabled", + "command": SYNAPSE_COMMAND_PATH, + "environment": self._synapse.synapse_environment(), + } + }, + "checks": { + CHECK_READY_NAME: self._synapse.check_ready(), + }, + } + return typing.cast(ops.pebble.LayerDict, layer) + + @property + def _pebble_layer_without_restart(self) -> ops.pebble.LayerDict: + """Return a dictionary representing a Pebble layer without restart.""" + new_layer = self._pebble_layer + new_layer["services"][SYNAPSE_SERVICE_NAME]["on-success"] = "ignore" + new_layer["services"][SYNAPSE_SERVICE_NAME]["on-failure"] = "ignore" + ignore = {CHECK_READY_NAME: "ignore"} + new_layer["services"][SYNAPSE_SERVICE_NAME]["on-check-failure"] = ignore + return new_layer diff --git a/src/synapse.py b/src/synapse.py index 551e948c..5688aadc 100644 --- a/src/synapse.py +++ b/src/synapse.py @@ -13,7 +13,6 @@ from ops.pebble import Check, ExecError, PathError from charm_state import CharmState -from charm_types import ExecResult from constants import ( CHECK_READY_NAME, COMMAND_MIGRATE_CONFIG, @@ -22,11 +21,58 @@ SYNAPSE_CONFIG_PATH, SYNAPSE_PORT, ) -from exceptions import CommandMigrateConfigError, ServerNameModifiedError logger = logging.getLogger(__name__) +class CommandMigrateConfigError(Exception): + """Exception raised when a charm configuration is found to be invalid. + + Attrs: + msg (str): Explanation of the error. + """ + + def __init__(self, msg: str): + """Initialize a new instance of the CommandMigrateConfigError exception. + + Args: + msg (str): Explanation of the error. + """ + self.msg = msg + + +class ServerNameModifiedError(Exception): + """Exception raised while checking configuration file. + + Raised if server_name from state is different than the one in the configuration file. + + Attrs: + msg (str): Explanation of the error. + """ + + def __init__(self, msg: str): + """Initialize a new instance of the ServerNameModifiedError exception. + + Args: + msg (str): Explanation of the error. + """ + self.msg = msg + + +class ExecResult(typing.NamedTuple): + """A named tuple representing the result of executing a command. + + Attributes: + exit_code: The exit status of the command (0 for success, non-zero for failure). + stdout: The standard output of the command as a string. + stderr: The standard error output of the command as a string. + """ + + exit_code: int + stdout: str + stderr: str + + class Synapse: """A class representing the Synapse application.""" @@ -57,13 +103,21 @@ def synapse_environment(self) -> typing.Dict[str, str]: Returns: A dictionary representing the Synapse environment variables. """ - return { + environment = { "SYNAPSE_SERVER_NAME": f"{self._charm_state.server_name}", "SYNAPSE_REPORT_STATS": f"{self._charm_state.report_stats}", # TLS disabled so the listener is HTTP. HTTPS will be handled by Traefik. # TODO verify support to HTTPS backend before changing this # pylint: disable=fixme "SYNAPSE_NO_TLS": str(True), } + datasource = self._charm_state.datasource + if datasource is not None: + environment["POSTGRES_DB"] = datasource["db"] + environment["POSTGRES_HOST"] = datasource["host"] + environment["POSTGRES_PORT"] = datasource["port"] + environment["POSTGRES_USER"] = datasource["user"] + environment["POSTGRES_PASSWORD"] = datasource["password"] + return environment def execute_migrate_config(self, container: ops.Container) -> None: """Run the Synapse command migrate_config. @@ -116,8 +170,9 @@ def check_server_name(self, container: ops.Container) -> None: ): msg = ( f"server_name {self._charm_state.server_name} is different from the existing " - f" one {configured_server_name}. Please revert the config or run the action " - "reset-instance if you to erase the existing instance and start a new one." + f"one {configured_server_name}. Please revert the config or run the action " + "reset-instance if you want to erase the existing instance and start a new " + "one." ) logger.error(msg) raise ServerNameModifiedError( diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 94fec330..11561645 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -13,6 +13,9 @@ from pytest import Config from pytest_operator.plugin import OpsTest +# caused by pytest fixtures, mark does not work in fixtures +# pylint: disable=too-many-arguments, unused-argument + @pytest_asyncio.fixture(scope="module", name="server_name") async def server_name_fixture() -> str: @@ -56,11 +59,14 @@ def synapse_app_name_fixture() -> str: @pytest_asyncio.fixture(scope="module", name="synapse_app") async def synapse_app_fixture( + ops_test: OpsTest, synapse_app_name: str, synapse_image: str, model: Model, server_name: str, synapse_charm: str, + postgresql_app: Application, + postgresql_app_name: str, ): """Build and deploy the Synapse charm.""" resources = { @@ -73,7 +79,10 @@ async def synapse_app_fixture( series="jammy", config={"server_name": server_name}, ) - await model.wait_for_idle(raise_on_blocked=True) + async with ops_test.fast_forward(): + await model.wait_for_idle(raise_on_blocked=True) + await model.relate(f"{synapse_app_name}:database", f"{postgresql_app_name}") + await model.wait_for_idle(wait_for_active=True) return app @@ -116,7 +125,7 @@ def traefik_app_name_fixture() -> str: @pytest_asyncio.fixture(scope="module", name="traefik_app") async def traefik_app_fixture( model: Model, - synapse_app, # pylint: disable=unused-argument + synapse_app, traefik_app_name: str, external_hostname: str, ): @@ -143,7 +152,7 @@ def nginx_integrator_app_name_fixture() -> str: @pytest_asyncio.fixture(scope="module", name="nginx_integrator_app") async def nginx_integrator_app_fixture( model: Model, - synapse_app, # pylint: disable=unused-argument + synapse_app, nginx_integrator_app_name: str, ): """Deploy nginx-ingress-integrator.""" @@ -172,3 +181,21 @@ async def another_synapse_app_fixture( await model.wait_for_idle() yield synapse_app + + +@pytest.fixture(scope="module", name="postgresql_app_name") +def postgresql_app_name_app_name_fixture() -> str: + """Return the name of the postgresql application deployed for tests.""" + return "postgresql-k8s" + + +@pytest_asyncio.fixture(scope="module", name="postgresql_app") +async def postgresql_app_fixture( + ops_test: OpsTest, + model: Model, + postgresql_app_name: str, +): + """Deploy postgresql.""" + async with ops_test.fast_forward(): + await model.deploy(postgresql_app_name, channel="14/stable", trust=True) + await model.wait_for_idle() diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index 5681a111..8173d479 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -6,6 +6,7 @@ import logging import typing +import pytest import requests from juju.action import Action from juju.application import Application @@ -39,11 +40,11 @@ async def test_synapse_is_up( assert "Welcome to the Matrix" in response.text +@pytest.mark.usefixtures("traefik_app") async def test_with_ingress( ops_test: OpsTest, model: Model, synapse_app: Application, - traefik_app, # pylint: disable=unused-argument traefik_app_name: str, external_hostname: str, get_unit_ips: typing.Callable[[str], typing.Awaitable[tuple[str, ...]]], @@ -66,11 +67,10 @@ async def test_with_ingress( assert "Welcome to the Matrix" in response.text +@pytest.mark.usefixtures("synapse_app", "nginx_integrator_app") async def test_with_nginx_route( model: Model, synapse_app_name: str, - synapse_app: Application, # pylint: disable=unused-argument - nginx_integrator_app, # pylint: disable=unused-argument nginx_integrator_app_name: str, ): """ diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index a7b4724d..4a65dd7f 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -7,6 +7,7 @@ import typing import unittest.mock +from secrets import token_hex import ops import pytest @@ -14,7 +15,6 @@ from ops.testing import Harness from charm import SynapseCharm -from charm_types import ExecResult from constants import ( COMMAND_MIGRATE_CONFIG, SYNAPSE_COMMAND_PATH, @@ -22,6 +22,7 @@ SYNAPSE_CONTAINER_NAME, TEST_SERVER_NAME, ) +from synapse import ExecResult def inject_register_command_handler(monkeypatch: pytest.MonkeyPatch, harness: Harness): @@ -185,6 +186,38 @@ def harness_server_name_changed_fixture(harness_server_name_configured: Harness) return harness +@pytest.fixture(name="harness_with_postgresql") +def harness_with_postgresql_fixture( + harness_server_name_configured: Harness, datasource_postgresql_password: str +) -> Harness: + """Ops testing framework harness fixture with postgresql relation. + + This is a workaround for the fact that Harness doesn't reinitialize the charm as expected. + Reference: https://github.com/canonical/operator/issues/736 + """ + harness = harness_server_name_configured + harness.disable_hooks() + relation_id = harness.add_relation("database", "postgresql") + harness.add_relation_unit(relation_id, "postgresql/0") + harness.update_relation_data( + relation_id, + "postgresql", + { + "endpoints": "myhost:5432", + "username": "user", + "password": datasource_postgresql_password, + }, + ) + harness._framework = ops.framework.Framework( + harness._storage, harness._charm_dir, harness._meta, harness._model + ) + harness._charm = None + harness.enable_hooks() + harness.begin() + harness.set_leader(True) + return harness + + @pytest.fixture(name="container_mocked") def container_mocked_fixture(monkeypatch: pytest.MonkeyPatch) -> unittest.mock.MagicMock: """Mock container base to others fixtures.""" @@ -219,3 +252,20 @@ def container_with_path_error_pass_fixture( remove_path_mock = unittest.mock.MagicMock(side_effect=path_error) monkeypatch.setattr(container_mocked, "remove_path", remove_path_mock) return container_mocked + + +@pytest.fixture(name="erase_database_mocked") +def erase_database_mocked_fixture(monkeypatch: pytest.MonkeyPatch) -> unittest.mock.MagicMock: + """Mock erase_database.""" + database_mocked = unittest.mock.MagicMock() + erase_database_mock = unittest.mock.MagicMock(side_effect=None) + monkeypatch.setattr(database_mocked, "erase_database", erase_database_mock) + monkeypatch.setattr(database_mocked, "get_conn", unittest.mock.MagicMock()) + monkeypatch.setattr(database_mocked, "get_relation_data", unittest.mock.MagicMock()) + return database_mocked + + +@pytest.fixture(name="datasource_postgresql_password") +def datasource_postgresql_password_fixture() -> str: + """Generate random password""" + return token_hex(16) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index a32c6a98..bfaf027d 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -5,9 +5,7 @@ # pylint: disable=protected-access -import io import json -import unittest.mock import ops import pytest @@ -69,7 +67,7 @@ def test_container_down(harness_server_name_configured: Harness) -> None: harness = harness_server_name_configured harness.set_can_connect(harness.model.unit.containers[SYNAPSE_CONTAINER_NAME], False) harness.update_config({"report_stats": True}) - assert isinstance(harness.model.unit.status, ops.WaitingStatus) + assert isinstance(harness.model.unit.status, ops.MaintenanceStatus) assert "Waiting for" in str(harness.model.unit.status) @@ -123,104 +121,3 @@ def test_server_name_change(harness_server_name_changed: Harness) -> None: harness = harness_server_name_changed assert isinstance(harness.model.unit.status, ops.BlockedStatus) assert "server_name modification is not allowed" in str(harness.model.unit.status) - - -@pytest.mark.parametrize("harness", [0], indirect=True) -def test_reset_instance_action(harness_server_name_changed: Harness) -> None: - """ - arrange: start the Synapse charm, set Synapse container to be ready and set server_name. - act: run reset-instance action. - assert: Synapse charm should reset the instance. - """ - harness = harness_server_name_changed - harness.set_leader(True) - event = unittest.mock.Mock() - # Calling to test the action since is not possible calling via harness - harness.charm._on_reset_instance_action(event) - assert event.set_results.call_count == 1 - event.set_results.assert_called_with({"reset-instance": True}) - assert isinstance(harness.model.unit.status, ops.ActiveStatus) - - -@pytest.mark.parametrize("harness", [1], indirect=True) -def test_reset_instance_action_failed(harness_server_name_changed: Harness) -> None: - """ - arrange: start the Synapse charm, set Synapse container to be ready and set server_name. - act: change server_name and run reset-instance action. - assert: Synapse charm should be blocked by error on migrate_config command. - """ - harness = harness_server_name_changed - harness.set_leader(True) - event = unittest.mock.Mock() - # Calling to test the action since is not possible calling via harness - harness.charm._on_reset_instance_action(event) - assert event.set_results.call_count == 0 - assert isinstance(harness.model.unit.status, ops.BlockedStatus) - assert "Migrate config failed" in str(harness.model.unit.status) - - -@pytest.mark.parametrize("harness", [0], indirect=True) -def test_reset_instance_action_path_error_blocked( - container_with_path_error_blocked: unittest.mock.MagicMock, - harness_server_name_changed: Harness, -) -> None: - """ - arrange: start the Synapse charm, set Synapse container to be ready and set server_name. - act: change server_name and run reset-instance action. - assert: Synapse charm should be blocked by error on remove_path. - """ - harness = harness_server_name_changed - harness.set_leader(True) - harness.charm.unit.get_container = unittest.mock.MagicMock( - return_value=container_with_path_error_blocked - ) - event = unittest.mock.MagicMock() - # Calling to test the action since is not possible calling via harness - harness.charm._on_reset_instance_action(event) - assert container_with_path_error_blocked.remove_path.call_count == 1 - assert isinstance(harness.model.unit.status, ops.BlockedStatus) - assert "Error erasing" in str(harness.model.unit.status) - - -@pytest.mark.parametrize("harness", [0], indirect=True) -def test_reset_instance_action_path_error_pass( - container_with_path_error_pass: unittest.mock.MagicMock, - harness_server_name_changed: Harness, - monkeypatch: pytest.MonkeyPatch, -) -> None: - """ - arrange: start the Synapse charm, set Synapse container to be ready and set server_name. - act: change server_name and run reset-instance action. - assert: Synapse charm should reset the instance. - """ - harness = harness_server_name_changed - harness.set_leader(True) - server_name_changed = "pebble-layer-1.synapse.com" - content = io.StringIO(f'server_name: "{server_name_changed}"') - pull_mock = unittest.mock.MagicMock(return_value=content) - monkeypatch.setattr(container_with_path_error_pass, "pull", pull_mock) - harness.charm.unit.get_container = unittest.mock.MagicMock( - return_value=container_with_path_error_pass - ) - event = unittest.mock.MagicMock() - # Calling to test the action since is not possible calling via harness - harness.charm._on_reset_instance_action(event) - assert container_with_path_error_pass.remove_path.call_count == 1 - assert isinstance(harness.model.unit.status, ops.ActiveStatus) - - -@pytest.mark.parametrize("harness", [0], indirect=True) -def test_reset_instance_action_no_leader( - harness_server_name_changed: Harness, -) -> None: - """ - arrange: start the Synapse charm, set Synapse container to be ready and set server_name. - act: change server_name and run reset-instance action. - assert: Synapse charm should take no action if no leader. - """ - harness = harness_server_name_changed - harness.set_leader(False) - event = unittest.mock.MagicMock() - # Calling to test the action since is not possible calling via harness - harness.charm._on_reset_instance_action(event) - assert isinstance(harness.model.unit.status, ops.BlockedStatus) diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py new file mode 100644 index 00000000..c3cbf124 --- /dev/null +++ b/tests/unit/test_database.py @@ -0,0 +1,267 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Database unit tests.""" + +# pylint: disable=protected-access + +import unittest.mock + +import ops +import psycopg2 +import pytest +from ops.testing import Harness +from psycopg2 import sql + +import database_observer +from charm_types import DatasourcePostgreSQL +from constants import SYNAPSE_CONTAINER_NAME +from database_client import DatabaseClient +from exceptions import CharmDatabaseRelationNotFoundError + + +@pytest.mark.parametrize("harness", [0], indirect=True) +def test_erase_database(harness_with_postgresql: Harness, monkeypatch: pytest.MonkeyPatch) -> None: + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: add database relation and erase database. + assert: erase query is executed. + """ + harness = harness_with_postgresql + datasource = harness.charm.database.get_relation_as_datasource() + db_client = DatabaseClient(datasource=datasource) + conn_mock = unittest.mock.MagicMock() + cursor_mock = conn_mock.cursor.return_value.__enter__.return_value + cursor_mock.execute.side_effect = None + monkeypatch.setattr(db_client, "_connect", unittest.mock.MagicMock()) + db_client._conn = conn_mock + db_client.erase() + conn_mock.cursor.assert_called() + calls = [ + unittest.mock.call(sql.Composed([sql.SQL("DROP DATABASE "), sql.Identifier("synapse")])), + unittest.mock.call( + sql.Composed( + [ + sql.SQL("CREATE DATABASE "), + sql.Identifier("synapse"), + sql.SQL(" WITH LC_CTYPE = 'C' LC_COLLATE='C' TEMPLATE='template0';"), + ] + ) + ), + ] + cursor_mock.execute.assert_has_calls(calls) + + +@pytest.mark.parametrize("harness", [0], indirect=True) +def test_erase_database_error( + harness_with_postgresql: Harness, monkeypatch: pytest.MonkeyPatch +) -> None: + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: add database relation and erase database. + assert: exception is raised. + """ + harness = harness_with_postgresql + datasource = harness.charm.database.get_relation_as_datasource() + db_client = DatabaseClient(datasource=datasource) + conn_mock = unittest.mock.MagicMock() + cursor_mock = conn_mock.cursor.return_value.__enter__.return_value + error_msg = "Invalid query" + cursor_mock.execute.side_effect = psycopg2.Error(error_msg) + monkeypatch.setattr(db_client, "_connect", unittest.mock.MagicMock()) + db_client._conn = conn_mock + with pytest.raises(psycopg2.Error): + db_client.erase() + + +@pytest.mark.parametrize("harness", [0], indirect=True) +def test_connect( + harness_with_postgresql: Harness, + monkeypatch: pytest.MonkeyPatch, + datasource_postgresql_password: str, +): + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: add relation and get connection. + assert: connection is called with correct parameters. + """ + harness = harness_with_postgresql + datasource = harness.charm.database.get_relation_as_datasource() + db_client = DatabaseClient(datasource=datasource) + mock_connection = unittest.mock.MagicMock() + mock_connection.autocommit = True + connect_mock = unittest.mock.MagicMock(return_value=mock_connection) + monkeypatch.setattr("psycopg2.connect", connect_mock) + db_client._connect() + query = ( + "dbname='synapse' user='user' host='myhost' " + f"password='{datasource_postgresql_password}' connect_timeout=5" + ) + connect_mock.assert_called_once_with(query) + + +@pytest.mark.parametrize("harness", [0], indirect=True) +def test_connect_error( + harness_with_postgresql: Harness, + monkeypatch: pytest.MonkeyPatch, +): + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: add relation and get connection. + assert: exception is raised. + """ + harness = harness_with_postgresql + datasource = harness.charm.database.get_relation_as_datasource() + db_client = DatabaseClient(datasource=datasource) + error_msg = "Invalid query" + connect_mock = unittest.mock.MagicMock(side_effect=psycopg2.Error(error_msg)) + monkeypatch.setattr("psycopg2.connect", connect_mock) + with pytest.raises(psycopg2.Error): + db_client._connect() + + +@pytest.mark.parametrize("harness", [0], indirect=True) +def test_prepare_database( + harness_with_postgresql: Harness, monkeypatch: pytest.MonkeyPatch +) -> None: + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: add database relation and prepare database. + assert: update query is executed. + """ + harness = harness_with_postgresql + datasource = harness.charm.database.get_relation_as_datasource() + db_client = DatabaseClient(datasource=datasource) + conn_mock = unittest.mock.MagicMock() + cursor_mock = conn_mock.cursor.return_value.__enter__.return_value + cursor_mock.execute.side_effect = None + monkeypatch.setattr(db_client, "_connect", unittest.mock.MagicMock()) + db_client._conn = conn_mock + db_client.prepare() + conn_mock.cursor.assert_called() + cursor_mock.execute.assert_called_with( + sql.Composed( + [ + sql.SQL("UPDATE pg_database SET datcollate='C', datctype='C' WHERE datname = "), + sql.Literal("synapse"), + ] + ) + ) + + +@pytest.mark.parametrize("harness", [0], indirect=True) +def test_prepare_database_error( + harness_with_postgresql: Harness, monkeypatch: pytest.MonkeyPatch +) -> None: + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: add database relation and prepare database. + assert: exception is raised. + """ + harness = harness_with_postgresql + datasource = harness.charm.database.get_relation_as_datasource() + db_client = DatabaseClient(datasource=datasource) + conn_mock = unittest.mock.MagicMock() + cursor_mock = conn_mock.cursor.return_value.__enter__.return_value + error_msg = "Invalid query" + cursor_mock.execute.side_effect = psycopg2.Error(error_msg) + monkeypatch.setattr(db_client, "_connect", unittest.mock.MagicMock()) + db_client._conn = conn_mock + with pytest.raises(psycopg2.Error): + db_client.prepare() + + +@pytest.mark.parametrize("harness", [0], indirect=True) +def test_relation_as_datasource( + harness_with_postgresql: Harness, datasource_postgresql_password: str +) -> None: + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: add database relation. + assert: database data and synapse environment should be the same as relation data. + """ + harness = harness_with_postgresql + expected = DatasourcePostgreSQL( + host="myhost", + db=harness.charm.app.name, + password=datasource_postgresql_password, + port="5432", + user="user", + ) + assert expected == harness.charm.database.get_relation_as_datasource() + assert harness.charm.app.name == harness.charm.database.get_database_name() + synapse_env = harness.charm._synapse.synapse_environment() + assert synapse_env["POSTGRES_DB"] == expected["db"] + assert synapse_env["POSTGRES_HOST"] == expected["host"] + assert synapse_env["POSTGRES_PORT"] == expected["port"] + assert synapse_env["POSTGRES_USER"] == expected["user"] + assert synapse_env["POSTGRES_PASSWORD"] == expected["password"] + + +@pytest.mark.parametrize("harness", [0], indirect=True) +def test_relation_as_datasource_error( + harness_with_postgresql: Harness, monkeypatch: pytest.MonkeyPatch +): + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: add relation and trigger change config. + assert: charm status is active. + """ + harness = harness_with_postgresql + get_relation_as_datasource_mock = unittest.mock.MagicMock(return_value=None) + monkeypatch.setattr( + harness.charm.database, "get_relation_as_datasource", get_relation_as_datasource_mock + ) + with pytest.raises(CharmDatabaseRelationNotFoundError): + harness.charm.database.get_database_name() + + +@pytest.mark.parametrize("harness", [0], indirect=True) +def test_change_config( + harness_with_postgresql: Harness, +): + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: add relation and trigger change config. + assert: charm status is active. + """ + harness = harness_with_postgresql + harness.charm.database._change_config(unittest.mock.MagicMock()) + assert isinstance(harness.model.unit.status, ops.ActiveStatus) + + +@pytest.mark.parametrize("harness", [0], indirect=True) +def test_change_config_error( + harness_with_postgresql: Harness, +): + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: add relation and trigger change config. + assert: charm status is active. + """ + harness = harness_with_postgresql + harness.set_can_connect(harness.model.unit.containers[SYNAPSE_CONTAINER_NAME], False) + harness.charm.database._change_config(unittest.mock.MagicMock()) + assert isinstance(harness.model.unit.status, ops.MaintenanceStatus) + + +@pytest.mark.parametrize("harness", [0], indirect=True) +def test_on_database_created(harness_with_postgresql: Harness, monkeypatch: pytest.MonkeyPatch): + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: add relation and trigger _on_database_created. + assert: charm status is active. + """ + harness = harness_with_postgresql + harness = harness_with_postgresql + db_client_mock = unittest.mock.MagicMock() + conn_mock = unittest.mock.MagicMock() + cursor_mock = conn_mock.cursor.return_value.__enter__.return_value + cursor_mock.execute.side_effect = None + monkeypatch.setattr(db_client_mock, "_connect", unittest.mock.MagicMock()) + db_client_mock._conn = conn_mock + monkeypatch.setattr( + database_observer, "DatabaseClient", unittest.mock.MagicMock(return_value=db_client_mock) + ) + harness.charm.database._on_database_created(unittest.mock.MagicMock()) + db_client_mock.prepare.assert_called_once() diff --git a/tests/unit/test_reset_instance_action.py b/tests/unit/test_reset_instance_action.py new file mode 100644 index 00000000..cf127818 --- /dev/null +++ b/tests/unit/test_reset_instance_action.py @@ -0,0 +1,135 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Reset instance action unit tests.""" + +# pylint: disable=protected-access + +import io +import unittest.mock + +import ops +import pytest +from ops.testing import Harness + +from constants import SYNAPSE_CONTAINER_NAME + + +@pytest.mark.parametrize("harness", [0], indirect=True) +def test_reset_instance_action(harness_server_name_changed: Harness) -> None: + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: run reset-instance action. + assert: Synapse charm should reset the instance. + """ + harness = harness_server_name_changed + harness.set_leader(True) + event = unittest.mock.Mock() + # Calling to test the action since is not possible calling via harness + harness.charm._on_reset_instance_action(event) + assert event.set_results.call_count == 1 + event.set_results.assert_called_with({"reset-instance": True}) + assert isinstance(harness.model.unit.status, ops.ActiveStatus) + + +@pytest.mark.parametrize("harness", [0], indirect=True) +def test_reset_instance_action_container_down(harness_server_name_changed: Harness) -> None: + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: run reset-instance action. + assert: Synapse charm should reset the instance. + """ + harness = harness_server_name_changed + harness.set_leader(True) + harness.set_can_connect(harness.model.unit.containers[SYNAPSE_CONTAINER_NAME], False) + event = unittest.mock.Mock() + # Calling to test the action since is not possible calling via harness + harness.charm._on_reset_instance_action(event) + assert event.set_results.call_count == 0 + assert event.fail.call_count == 1 + + +@pytest.mark.parametrize("harness", [1], indirect=True) +def test_reset_instance_action_failed(harness_server_name_changed: Harness) -> None: + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: change server_name and run reset-instance action. + assert: Synapse charm should be blocked by error on migrate_config command. + """ + harness = harness_server_name_changed + harness.set_leader(True) + event = unittest.mock.Mock() + # Calling to test the action since is not possible calling via harness + harness.charm._on_reset_instance_action(event) + assert event.set_results.call_count == 0 + assert isinstance(harness.model.unit.status, ops.BlockedStatus) + assert "Migrate config failed" in str(harness.model.unit.status) + + +@pytest.mark.parametrize("harness", [0], indirect=True) +def test_reset_instance_action_path_error_blocked( + container_with_path_error_blocked: unittest.mock.MagicMock, + harness_server_name_changed: Harness, +) -> None: + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: change server_name and run reset-instance action. + assert: Synapse charm should be blocked by error on remove_path. + """ + harness = harness_server_name_changed + harness.set_leader(True) + harness.charm.unit.get_container = unittest.mock.MagicMock( + return_value=container_with_path_error_blocked + ) + event = unittest.mock.MagicMock() + # Calling to test the action since is not possible calling via harness + harness.charm._on_reset_instance_action(event) + assert container_with_path_error_blocked.remove_path.call_count == 1 + assert isinstance(harness.model.unit.status, ops.BlockedStatus) + assert "Error erasing" in str(harness.model.unit.status) + + +@pytest.mark.parametrize("harness", [0], indirect=True) +def test_reset_instance_action_path_error_pass( + container_with_path_error_pass: unittest.mock.MagicMock, + harness_server_name_changed: Harness, + monkeypatch: pytest.MonkeyPatch, + erase_database_mocked: unittest.mock.MagicMock, +) -> None: + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: change server_name and run reset-instance action. + assert: Synapse charm should reset the instance. + """ + harness = harness_server_name_changed + harness.set_leader(True) + harness.charm._database = erase_database_mocked + server_name_changed = "pebble-layer-1.synapse.com" + content = io.StringIO(f'server_name: "{server_name_changed}"') + pull_mock = unittest.mock.MagicMock(return_value=content) + monkeypatch.setattr(container_with_path_error_pass, "pull", pull_mock) + harness.charm.unit.get_container = unittest.mock.MagicMock( + return_value=container_with_path_error_pass + ) + event = unittest.mock.MagicMock() + # Calling to test the action since is not possible calling via harness + harness.charm._on_reset_instance_action(event) + assert container_with_path_error_pass.remove_path.call_count == 1 + assert isinstance(harness.model.unit.status, ops.ActiveStatus) + + +@pytest.mark.parametrize("harness", [0], indirect=True) +def test_reset_instance_action_no_leader( + harness_server_name_changed: Harness, +) -> None: + """ + arrange: start the Synapse charm, set Synapse container to be ready and set server_name. + act: change server_name and run reset-instance action. + assert: Synapse charm should take no action if no leader. + """ + harness = harness_server_name_changed + harness.set_leader(False) + event = unittest.mock.MagicMock() + # Calling to test the action since is not possible calling via harness + harness.charm._on_reset_instance_action(event) + assert isinstance(harness.model.unit.status, ops.BlockedStatus)