diff --git a/.github/workflows/integration_test.yaml b/.github/workflows/integration_test.yaml index cc39067..a981372 100644 --- a/.github/workflows/integration_test.yaml +++ b/.github/workflows/integration_test.yaml @@ -1,11 +1,16 @@ name: Integration tests - on: pull_request: - + workflow_call: jobs: integration-test-microk8s: name: Integration tests (microk8s) + strategy: + fail-fast: false + matrix: + tox-environments: + - integration-charm + - integration-policy runs-on: ubuntu-latest steps: - name: Checkout @@ -15,13 +20,7 @@ jobs: with: juju-channel: 3.1/stable provider: microk8s + microk8s-addons: "ingress storage dns rbac registry" channel: 1.25-strict/stable - name: Run integration tests - # set a predictable model name so it can be consumed by charm-logdump-action - run: tox -e integration -- --model testing - - name: Dump logs - uses: canonical/charm-logdump-action@main - if: failure() - with: - app: trino-k8s - model: testing + run: tox -e ${{ matrix.tox-environments }} diff --git a/.github/workflows/test_and_publish_charm.yaml b/.github/workflows/test_and_publish_charm.yaml index 54a160a..f7bbe0b 100644 --- a/.github/workflows/test_and_publish_charm.yaml +++ b/.github/workflows/test_and_publish_charm.yaml @@ -13,4 +13,4 @@ jobs: integration-test-provider: microk8s integration-test-provider-channel: 1.25-strict/stable integration-test-juju-channel: 3.1/stable - integration-test-modules: '["test_charm"]' + integration-test-modules: '["test_charm", "test_policy"]' diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 264fa47..ccc08dc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -19,19 +19,22 @@ This charm is used to deploy Trino Server in a k8s cluster. For local deployment ### Install Microk8s ``` # Install Microk8s from snap: -sudo snap install microk8s --classic --channel=1.25 +sudo snap install microk8s --channel 1.25-strict/stable -# Add the 'ubuntu' user to the Microk8s group: -sudo usermod -a -G microk8s ubuntu +# Add your user to the Microk8s group: +sudo usermod -a -G snap_microk8s $USER -# Give the 'ubuntu' user permissions to read the ~/.kube directory: -sudo chown -f -R ubuntu ~/.kube +# Switch to microk8s group +newgrp snap_microk8s -# Create the 'microk8s' group: -newgrp microk8s +# Create the ~/.kube/ directory and load microk8s configuration +mkdir -p ~/.kube/ && microk8s config > ~/.kube/config # Enable the necessary Microk8s addons: -microk8s enable hostpath-storage dns +sudo microk8s enable hostpath-storage dns + +# Set up a short alias for Kubernetes CLI: +sudo snap alias microk8s.kubectl kubectl ``` ### Install Charmcraft ``` diff --git a/config.yaml b/config.yaml index 5e53387..a70cb45 100644 --- a/config.yaml +++ b/config.yaml @@ -12,12 +12,6 @@ options: Valid values: info, debug, warn, error. default: info type: string - application-name: - description: | - The name of the application service to be used as internal host - when providing a jdbc string to Ranger for the policy relation. - default: trino-k8s - type: string int-comms-secret: description: | The secret Trino uses to communicate between nodes. diff --git a/lib/charms/nginx_ingress_integrator/v0/nginx_route.py b/lib/charms/nginx_ingress_integrator/v0/nginx_route.py index 57833bd..9a7baa8 100644 --- a/lib/charms/nginx_ingress_integrator/v0/nginx_route.py +++ b/lib/charms/nginx_ingress_integrator/v0/nginx_route.py @@ -86,7 +86,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 3 +LIBPATCH = 4 __all__ = ["require_nginx_route", "provide_nginx_route"] @@ -172,7 +172,8 @@ def _config_reconciliation(self, _event: typing.Any = None) -> None: # C901 is ignored since the method has too many ifs but wouldn't be # necessarily good to reduce to smaller methods. -def require_nginx_route( # pylint: disable=too-many-locals,too-many-branches # noqa: C901 +# E501: line too long +def require_nginx_route( # pylint: disable=too-many-locals,too-many-branches,too-many-arguments # noqa: C901,E501 *, charm: ops.charm.CharmBase, service_hostname: str, diff --git a/src/charm.py b/src/charm.py index 97874ed..849d9b4 100755 --- a/src/charm.py +++ b/src/charm.py @@ -50,7 +50,7 @@ class TrinoK8SCharm(CharmBase): """Charm the service. Attrs: - _state: used to store data that is persisted across invocations. + state: used to store data that is persisted across invocations. external_hostname: DNS listing used for external connections. """ @@ -67,7 +67,7 @@ def __init__(self, *args): """ super().__init__(*args) self.name = "trino" - self._state = State(self.app, lambda: self.model.get_relation("peer")) + self.state = State(self.app, lambda: self.model.get_relation("peer")) self.connector = TrinoConnector(self) self.policy = PolicyRelationHandler(self) @@ -103,7 +103,7 @@ def _on_install(self, event): Args: event: The event triggered when the relation changed. """ - self.unit.status = MaintenanceStatus("installing trino") + self.unit.status = MaintenanceStatus(f"{self.name} unit provisioned.") @log_event_handler(logger) def _on_pebble_ready(self, event: PebbleReadyEvent): @@ -131,7 +131,8 @@ def _on_peer_relation_changed(self, event): Args: event: The event triggered when the peer relation changed """ - if not self.ready_to_start(): + if not self.state.is_ready(): + self.unit.status = WaitingStatus("Waiting for peer relation.") event.defer() return @@ -148,7 +149,7 @@ def _on_peer_relation_changed(self, event): self.unit.status = BlockedStatus(str(err)) return - target_connectors = self._state.connectors or {} + target_connectors = self.state.connectors or {} if target_connectors == current_connectors: return @@ -162,17 +163,11 @@ def _get_current_connectors(self, container): Returns: properties: A dictionary of existing connector configurations - - Raises: - RuntimeError: Failed to return property files """ properties = {} - out, err = container.exec(["ls", CATALOG_PATH]).wait_output() - if err: - raise RuntimeError(f"Could not return files: {err}") - - files = out.strip().split("\n") - property_names = [file_name.split(".")[0] for file_name in files] + files = container.list_files(CATALOG_PATH, pattern="*.properties") + file_names = [f.name for f in files] + property_names = [file_name.split(".")[0] for file_name in file_names] for item in property_names: path = f"{CATALOG_PATH}/{item}.properties" @@ -211,18 +206,6 @@ def _restart_trino(self, container): container.restart(self.name) self.unit.status = ActiveStatus() - def ready_to_start(self): - """Check if peer relation established. - - Returns: - True if peer relation established, else False. - """ - if not self._state.is_ready(): - self.unit.status = WaitingStatus("Waiting for peer relation.") - return False - - return True - @log_event_handler(logger) def _on_restart(self, event): """Restart Trino, action handler. @@ -270,9 +253,9 @@ def _enable_password_auth(self, container): def _create_truststore_password(self): """Create truststore password if it does not exist.""" - if not self._state.truststore_password: + if not self.state.truststore_password: truststore_password = generate_password() - self._state.truststore_password = truststore_password + self.state.truststore_password = truststore_password def _open_service_port(self): """Open port 8080 on Trino coordinator.""" @@ -315,11 +298,11 @@ def _create_environment(self): "OAUTH_CLIENT_ID": self.config.get("google-client-id"), "OAUTH_CLIENT_SECRET": self.config.get("google-client-secret"), "WEB_PROXY": self.config.get("web-proxy"), - "SSL_PWD": self._state.truststore_password, + "SSL_PWD": self.state.truststore_password, "SSL_PATH": f"{CONF_PATH}/truststore.jks", "CHARM_FUNCTION": self.config["charm-function"], "DISCOVERY_URI": self.config["discovery-uri"], - "APPLICATION_NAME": self.config["application-name"], + "APPLICATION_NAME": self.app.name, } return env @@ -334,7 +317,8 @@ def _update(self, event): event.defer() return - if not self.ready_to_start(): + if not self.state.is_ready(): + self.unit.status = WaitingStatus("Waiting for peer relation.") event.defer() return diff --git a/src/connector.py b/src/connector.py index 0741131..cd5277e 100644 --- a/src/connector.py +++ b/src/connector.py @@ -106,11 +106,11 @@ def _add_cert_to_truststore(self, container, conn_name): "-keystore", "truststore.jks", "-storepass", - self.charm._state.truststore_password, + self.charm.state.truststore_password, "-noprompt", ] try: - container.exec(command, working_dir=CONF_PATH).wait_output() + container.exec(command, working_dir=CONF_PATH).wait() except ExecError as e: expected_error_string = f"alias <{conn_name}> already exists" if expected_error_string in str(e.stdout): @@ -153,12 +153,12 @@ def _add_connector_to_state(self, config, conn_name): config: The configuration of the connector conn_name: The name of the connector """ - if self.charm._state.connectors: - connectors = self.charm._state.connectors + if self.charm.state.connectors: + connectors = self.charm.state.connectors else: connectors = {} connectors[conn_name] = config - self.charm._state.connectors = connectors + self.charm.state.connectors = connectors def _delete_cert_from_truststore(self, container, conn_name): """Delete CA from JKS truststore. @@ -179,11 +179,11 @@ def _delete_cert_from_truststore(self, container, conn_name): "-keystore", "truststore.jks", "-storepass", - self.charm._state.truststore_password, + self.charm.state.truststore_password, "-noprompt", ] try: - container.exec(command, working_dir=CONF_PATH).wait_output() + container.exec(command, working_dir=CONF_PATH).wait() except ExecError as e: expected_error_string = f"Alias <{conn_name}> does not exist" if expected_error_string in str(e.stdout): @@ -213,7 +213,7 @@ def _on_remove_connector(self, event: ActionEvent): ) return - if not self.charm._state.connectors[conn_name]: + if not self.charm.state.connectors[conn_name]: event.fail( f"Failed to remove {conn_name}, connector does not exist" ) @@ -230,9 +230,9 @@ def _on_remove_connector(self, event: ActionEvent): container.remove_path(path=path) - existing_connectors = self.charm._state.connectors + existing_connectors = self.charm.state.connectors del existing_connectors[conn_name] - self.charm._state.connectors = existing_connectors + self.charm.state.connectors = existing_connectors self.charm._restart_trino(container) event.set_results({"result": "connector successfully removed"}) diff --git a/src/relations/policy.py b/src/relations/policy.py index a0d607d..7f45512 100644 --- a/src/relations/policy.py +++ b/src/relations/policy.py @@ -7,7 +7,6 @@ import yaml from ops import framework -from ops.model import BlockedStatus from ops.pebble import ExecError from literals import ( @@ -92,10 +91,11 @@ def _on_relation_changed(self, event): container = self.charm.model.unit.get_container(self.charm.name) if not container.can_connect(): + event.defer() return policy_manager_url = event.relation.data[event.app].get( - "policy_manager_url", None + "policy_manager_url" ) if not policy_manager_url: return @@ -110,13 +110,10 @@ def _on_relation_changed(self, event): self._enable_plugin(container) logger.info("Ranger plugin is enabled.") except ExecError as err: - self.charm.unit.status = BlockedStatus( - "Failed to enable Ranger plugin." - ) raise ExecError(f"Unable to enable Ranger plugin: {err}") from err users_and_groups = event.relation.data[event.app].get( - "user-group-configuration", None + "user-group-configuration" ) if users_and_groups: try: @@ -124,13 +121,6 @@ def _on_relation_changed(self, event): except ExecError: logger.exception("Failed to synchronize groups:") event.defer() - except Exception: - logger.exception( - "An exception occurred while sychronizing Ranger groups:" - ) - self.charm.unit.status = BlockedStatus( - "Failed to synchronize Ranger groups." - ) self.charm._restart_trino(container) def _prepare_service(self, event): @@ -142,7 +132,7 @@ def _prepare_service(self, event): Returns: service: Service values to be set in relation databag """ - host = self.charm.config["application-name"] + host = self.charm.app.name port = TRINO_PORTS["HTTP"] uri = f"{host}:{port}" @@ -173,6 +163,7 @@ def _on_relation_broken(self, event): container = self.charm.model.unit.get_container(self.charm.name) if not container.can_connect(): + event.defer() return if not container.exists(self.ranger_plugin_path): @@ -207,7 +198,7 @@ def _enable_plugin(self, container): command, working_dir=self.ranger_plugin_path, environment=JAVA_ENV, - ).wait_output() + ).wait() @handle_exec_error def _unpack_plugin(self, container): @@ -226,7 +217,7 @@ def _unpack_plugin(self, container): "xf", f"ranger-{tar_version}-trino-plugin.tar.gz", ] - container.exec(command, working_dir="/root").wait_output() + container.exec(command, working_dir="/root").wait() def _configure_plugin_properties( self, container, policy_manager_url, policy_relation @@ -367,7 +358,7 @@ def _create_members(self, container, member_type, to_create): elif member_type == "membership": command = ["usermod", "-aG", member[0], member[1]] - container.exec(command).wait_output() + container.exec(command).wait() @handle_exec_error def _delete_memberships(self, container, to_delete): @@ -383,7 +374,7 @@ def _delete_memberships(self, container, to_delete): logger.debug(f"Attempting to delete membership {membership}") container.exec( ["deluser", membership[1], membership[0]] - ).wait_output() + ).wait() @handle_exec_error def _get_ranger_users(self, container): @@ -421,7 +412,6 @@ def _disable_ranger_plugin(self, container): command, working_dir=self.ranger_plugin_path, environment=JAVA_ENV, - ).wait_output() + ).wait() - if container.exists(RANGER_ACCESS_CONTROL_PATH): - container.remove_path(RANGER_ACCESS_CONTROL_PATH) + container.remove_path(RANGER_ACCESS_CONTROL_PATH, recursive=True) diff --git a/src/utils.py b/src/utils.py index 14a6224..cb0deec 100644 --- a/src/utils.py +++ b/src/utils.py @@ -109,17 +109,11 @@ def validate_membership(connector_fields, conn_input): required = connector_fields["required"] optional = connector_fields["optional"] - missing_fields = [] - for field in required: - if field not in conn_input: - missing_fields.append(field) + missing_fields = set(required) - set(conn_input) if missing_fields: raise ValueError(f"field(s) {missing_fields!r} are required") - illegal_fields = [] - for field in conn_input: - if field not in required and field not in optional: - illegal_fields.append(field) + illegal_fields = set(conn_input) - (set(required) | set(optional)) if illegal_fields: raise ValueError(f"field(s) {illegal_fields!r} are not allowed") diff --git a/tox.ini b/tox.ini index 5003da9..2977198 100644 --- a/tox.ini +++ b/tox.ini @@ -36,6 +36,32 @@ deps = commands = pytest {[vars]tst_path}integration -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} +[testenv:integration-policy] +description = Run integration tests +deps = + ipdb==0.13.9 + juju==3.1.0.1 + pytest==7.4.0 + pytest-operator==0.29.0 + trino==0.326.0 + apache-ranger==0.0.11 + -r{toxinidir}/requirements.txt +commands = + pytest {[vars]tst_path}integration/test_policy.py -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} + +[testenv:integration-charm] +description = Run integration tests +deps = + ipdb==0.13.9 + juju==3.1.0.1 + pytest==7.4.0 + pytest-operator==0.29.0 + trino==0.326.0 + apache-ranger==0.0.11 + -r{toxinidir}/requirements.txt +commands = + pytest {[vars]tst_path}integration/test_charm.py -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} + [testenv:unit] description = Run tests deps =