diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index d303d6b03..9471db973 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -12,7 +12,7 @@ A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior, for e.g: -1. Install funcx==0.2.2 and funcx-endpoint==0.2.2 with Python 3.7 on cluster +1. Install globus-compute-sdk==2.0.0 and globus-compute-endpoint==2.0.0 with Python 3.7 on cluster 2. Run a test script 3. Wait 5 mins 4. See error @@ -25,8 +25,8 @@ A clear and concise description of what you expected to happen. - OS & Container technology: [e.g. ubuntu, centos & singularity, docker] @ endpoint - Python version @ client - Python version @ endpoint - - funcx version @ client - - funcx-endpoint version @ endpoint + - globus-compute-sdk version @ client + - globus-compute-endpoint version @ endpoint **Distributed Environment** - Where are you running the funcX script from? [e.g. Laptop/Workstation, Login node, Compute node] diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 9188d3533..916581d43 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -22,11 +22,11 @@ jobs: run: pre-commit run -a - name: mypy (sdk) run: | - cd funcx_sdk + cd compute_sdk tox -e mypy - name: mypy (endpoint) run: | - cd funcx_endpoint + cd compute_endpoint tox -e mypy # ensure docs can build, imitating the RTD build as best we can @@ -50,8 +50,8 @@ jobs: - name: install requirements run: | python -m pip install --upgrade setuptools - python -m pip install './funcx_sdk' - python -m pip install './funcx_endpoint' + python -m pip install './compute_sdk' + python -m pip install './compute_endpoint' python -m pip install safety - name: run safety check run: safety check @@ -72,7 +72,7 @@ jobs: - run: python -m pip install tox - name: run tests run: | - cd funcx_sdk + cd compute_sdk tox -e py test-endpoint: @@ -97,7 +97,7 @@ jobs: - run: python -m pip install tox - name: run tests run: | - cd funcx_endpoint + cd compute_endpoint tox -e py -- --log-cli-level=ERROR - name: Collect Docker Logs if: failure() @@ -122,7 +122,7 @@ jobs: run: echo "##[set-output name=imagetag;]$(echo ${GITHUB_REF##*/})" id: extract_tag_name - - name: Build funcX-endpoint Image for selected python version + - name: Build globus-compute-endpoint Image for selected python version uses: elgohr/Publish-Docker-Github-Action@master env: PYTHON_VERSION: ${{ matrix.python }} diff --git a/.github/workflows/daily.yaml b/.github/workflows/daily.yaml index e745793f4..1f6d91424 100644 --- a/.github/workflows/daily.yaml +++ b/.github/workflows/daily.yaml @@ -35,7 +35,7 @@ jobs: - name: install requirements run: | python -m pip install --upgrade pip setuptools wheel - python -m pip install './funcx_sdk' + python -m pip install './compute_sdk' python -m pip install safety - name: run safety check run: safety check @@ -50,7 +50,7 @@ jobs: - name: install requirements run: | python -m pip install --upgrade pip setuptools wheel - python -m pip install './funcx_endpoint' + python -m pip install './compute_endpoint' python -m pip install safety - name: run safety check run: safety check diff --git a/.isort.cfg b/.isort.cfg index 693bac375..c8597e4a1 100644 --- a/.isort.cfg +++ b/.isort.cfg @@ -1,3 +1,3 @@ [settings] profile = black -known_first_party = funcx,funcx_endpoint +known_first_party = globus-compute-sdk, globus-compute-endpoint diff --git a/.readthedocs.yml b/.readthedocs.yml index 31ebd3e43..50e376a4a 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -11,8 +11,8 @@ build: python: install: - method: pip - path: ./funcx_sdk + path: ./compute_sdk extra_requirements: - docs - method: pip - path: ./funcx_endpoint + path: ./compute_endpoint diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 33ed8d0fc..fc7a78b85 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -48,14 +48,14 @@ to fill out the fragment. ## Installing Testing Requirements Testing requirements for each of the two packages in this repository -(funcx-sdk and funcx-endpoint) are specified as installable extras. +(globus-compute-sdk and globus-compute-endpoint) are specified as installable extras. -To install the funcx-sdk test requirements +To install the globus-compute-sdk test requirements - cd funcx_sdk + cd compute_sdk pip install '.[test]' -To install the funcx-endpoint test requirements +To install the globus-compute-endpoint test requirements - cd funcx_endpoint + cd compute_endpoint pip install '.[test]' diff --git a/Dockerfile-endpoint b/Dockerfile-endpoint index e7abb6213..5bea6496c 100644 --- a/Dockerfile-endpoint +++ b/Dockerfile-endpoint @@ -14,26 +14,26 @@ RUN apt-get update && apt-get upgrade -y RUN if [ -n "$pip_conf" ]; then echo "$pip_conf" > "/etc/pip.conf"; fi -RUN mkdir /opt/funcx +RUN mkdir /opt/compute RUN python -m pip install -U pip RUN python -m pip install kubernetes RUN python -m pip install --no-binary :all: --force-reinstall pyzmq -COPY funcx_sdk /opt/funcx/funcx_sdk/ -WORKDIR /opt/funcx/funcx_sdk +COPY compute_sdk /opt/compute/compute_sdk/ +WORKDIR /opt/compute/compute_sdk RUN python -m pip install . -COPY funcx_endpoint /opt/funcx/funcx_endpoint -WORKDIR /opt/funcx/funcx_endpoint +COPY compute_endpoint /opt/compute/compute_endpoint +WORKDIR /opt/compute/compute_endpoint RUN python -m pip install . # Undo local-dev ministrations RUN if [ -n "$pip_conf" ]; then rm -f "/etc/pip.conf"; fi RUN if [ -n "$apt_proxy_url" ]; then rm -f "/etc/apt/apt.conf.d/01proxy"; fi -RUN useradd -m funcx -RUN mkdir -p /home/funcx/.kube -USER funcx -WORKDIR /home/funcx +RUN useradd -m compute +RUN mkdir -p /home/compute/.kube +USER compute +WORKDIR /home/compute COPY helm/boot.sh . -ENV HOME /home/funcx +ENV HOME /home/compute diff --git a/Makefile b/Makefile index f5a2f38f0..a1d167cce 100644 --- a/Makefile +++ b/Makefile @@ -1,13 +1,13 @@ .venv-docs: python -m venv .venv-docs .venv-docs/bin/pip install -U pip setuptools - .venv-docs/bin/pip install './funcx_sdk[docs]' './funcx_endpoint' + .venv-docs/bin/pip install './compute_sdk[docs]' './compute_endpoint' .PHONY: lint docs lint: pre-commit run -a - cd funcx_sdk; tox -e mypy; cd .. - cd funcx_endpoint; tox -e mypy; cd .. + cd compute_sdk; tox -e mypy; cd .. + cd compute_endpoint; tox -e mypy; cd .. docs: .venv-docs # clean the build dir before rebuilding diff --git a/README.rst b/README.rst index fcbb654e5..7ef55dbbb 100644 --- a/README.rst +++ b/README.rst @@ -1,10 +1,10 @@ -funcX - Fast Function Serving +Globus Compute - Fast Function Serving ============================= |licence| |build-status| |docs| |launch| |NSF-2004894| |NSF-2004932| -funcX is a high-performance function-as-a-service (FaaS) platform that enables -intuitive, flexible, efficient, scalable, and performant remote function execution -on existing infrastructure including clouds, clusters, and supercomputers. +Globus Compute (formerly funcX) is a high-performance function-as-a-service (FaaS) +platform that enables intuitive, flexible, efficient, scalable, and performant remote +function execution on existing infrastructure including clouds, clusters, and supercomputers. .. |licence| image:: https://img.shields.io/badge/License-Apache%202.0-blue.svg :target: https://github.com/funcx-faas/funcX/blob/master/LICENSE @@ -26,36 +26,36 @@ on existing infrastructure including clouds, clusters, and supercomputers. :alt: NSF award info -.. image:: docs/_static/logo.png +.. image:: docs/_static/images/globus-300x300-blue.png :target: https://www.funcx.org :width: 200 Website: https://www.funcx.org -Documentation: https://funcx.readthedocs.io/en/latest/ +Documentation: https://globus-compute.readthedocs.io/en/latest/ Quickstart ========== -funcX is in a beta state with version `0.3.7` currently available on PyPI. +Globus Compute is currently available on PyPI. -To install funcX, please ensure you have python3.6+.:: +To install Globus Compute, please ensure you have python3.7+.:: $ python3 --version Install using Pip:: - $ pip install funcx + $ pip install globus-compute-sdk To use our example notebooks you will need Jupyter.:: $ pip install jupyter -.. note:: The funcX client is supported on MacOS, Linux and Windows. - The funcx-endpoint is only supported on Linux. +.. note:: The Globus Compute client is supported on MacOS, Linux and Windows. + The Globus Compute endpoint is only supported on Linux. Documentation ============= -Complete documentation for funcX is available `here `_ +Complete documentation for Globus Compute is available `here `_ diff --git a/RELEASING.md b/RELEASING.md index 85a211aa1..ca559ea3a 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -1,6 +1,6 @@ # Releasing -Releases of `funcx` and `funcx-endpoint` are always done with a single version +Releases of `globus-compute-sdk` and `globus-compute-endpoint` are always done with a single version number, even when only one package has changes. The process is partially automated with tools to help along the way. @@ -17,7 +17,7 @@ You will also need the following credentials: - a configured GPG key in `git` in order to create signed tags - pypi credentials for use with `twine` (e.g. a token in `~/.pypirc`) valid for - publishing `funcx` and `funcx-endpoint` + publishing `globus-compute-sdk` and `globus-compute-endpoint` ## Procedure @@ -25,7 +25,7 @@ You will also need the following credentials: the `-dev` suffix ```bash -$EDITOR funcx_sdk/funcx/version.py funcx_endpoint/funcx_endpoint/version.py +$EDITOR compute_sdk/globus_compute_sdk/version.py compute_endpoint/globus_compute_endpoint/version.py ``` 2. Update the changelog by running `scriv collect --edit` @@ -35,7 +35,7 @@ $EDITOR funcx_sdk/funcx/version.py funcx_endpoint/funcx_endpoint/version.py ```bash git add changelog.d/ docs/changelog.rst -git add funcx_sdk/funcx/version.py funcx_endpoint/funcx_endpoint/version.py +git add compute_sdk/globus_compute_sdk/version.py compute_endpoint/globus_compute_endpoint/version.py git commit -m 'Bump versions and changelog for release' git push ``` @@ -50,8 +50,8 @@ git push then commit and push, e.g. ```bash -$EDITOR funcx_sdk/funcx/version.py funcx_endpoint/funcx_endpoint/version.py -git add funcx_sdk/funcx/version.py funcx_endpoint/funcx_endpoint/version.py +$EDITOR compute_sdk/globus_compute_sdk/version.py compute_endpoint/globus_compute_endpoint/version.py +git add compute_sdk/globus_compute_sdk/version.py compute_endpoint/globus_compute_endpoint/version.py git commit -m 'Bump versions for dev' git push ``` diff --git a/changelog.d/scriv.ini b/changelog.d/scriv.ini index b26804c74..d271f728c 100644 --- a/changelog.d/scriv.ini +++ b/changelog.d/scriv.ini @@ -2,8 +2,8 @@ format = rst rst_header_chars = -^ output_file = docs/changelog.rst -entry_title_template = funcx & funcx-endpoint v{{ version }} -version = literal: funcx_sdk/funcx/version.py: __version__ +entry_title_template = globus-compute-sdk & globus-compute-endpoint v{{ version }} +version = literal: compute_sdk/globus_compute_sdk/version.py: __version__ # compare against scriv default: # categories = Removed, Added, Changed, Deprecated, Fixed, Security diff --git a/funcx_endpoint/LICENSE b/compute_endpoint/LICENSE similarity index 100% rename from funcx_endpoint/LICENSE rename to compute_endpoint/LICENSE diff --git a/compute_endpoint/globus_compute_endpoint/__init__.py b/compute_endpoint/globus_compute_endpoint/__init__.py new file mode 100644 index 000000000..795ff5865 --- /dev/null +++ b/compute_endpoint/globus_compute_endpoint/__init__.py @@ -0,0 +1,4 @@ +from globus_compute_endpoint.version import __version__ as _version + +__author__ = "The Globus Compute Team" +__version__ = _version diff --git a/funcx_endpoint/funcx_endpoint/cli.py b/compute_endpoint/globus_compute_endpoint/cli.py similarity index 66% rename from funcx_endpoint/funcx_endpoint/cli.py rename to compute_endpoint/globus_compute_endpoint/cli.py index 6dabfee22..34392b001 100644 --- a/funcx_endpoint/funcx_endpoint/cli.py +++ b/compute_endpoint/globus_compute_endpoint/cli.py @@ -1,21 +1,24 @@ from __future__ import annotations +import difflib import importlib.util import json import logging import pathlib +import shutil import sys +import uuid import click from click import ClickException - -from funcx.sdk.login_manager import LoginManager -from funcx.sdk.login_manager.whoami import print_whoami_info +from globus_compute_sdk.sdk.login_manager import LoginManager +from globus_compute_sdk.sdk.login_manager.whoami import print_whoami_info from .endpoint.endpoint import Endpoint from .endpoint.endpoint_manager import EndpointManager from .endpoint.utils.config import Config from .logging_config import setup_logging +from .version import DEPRECATION_FUNCX_ENDPOINT log = logging.getLogger(__name__) @@ -34,23 +37,22 @@ def ensure(cls) -> CommandState: return click.get_current_context().ensure_object(CommandState) -def init_endpoint_configuration_dir(funcx_conf_dir: pathlib.Path): - if not funcx_conf_dir.exists(): - log.info( - "No existing configuration found at %s. Initializing...", funcx_conf_dir - ) +def init_endpoint_configuration_dir(conf_dir: pathlib.Path): + if not conf_dir.exists(): + log.info("No existing configuration found at %s. Initializing...", conf_dir) try: - funcx_conf_dir.mkdir(mode=0o700, exist_ok=True) + conf_dir.mkdir(mode=0o700, exist_ok=True) except Exception as exc: e = click.ClickException( f"{exc}\n\nUnable to create configuration directory" ) raise e from exc - elif not funcx_conf_dir.is_dir(): + elif not conf_dir.is_dir(): raise click.ClickException( - f"File already exists: {funcx_conf_dir}\n\n" - "Refusing to initialize funcX configuration directory: path already exists" + f"File already exists: {conf_dir}\n\n" + "Refusing to initialize Globus Compute configuration directory: " + "path already exists" ) @@ -65,8 +67,8 @@ def get_cli_endpoint() -> Endpoint: # as a result, any number of CLI options may be used to tweak the CommandState # via callbacks, and the Endpoint will only be constructed within commands which # access the Endpoint via this getter - funcx_dir = get_config_dir() - init_endpoint_configuration_dir(funcx_dir) + conf_dir = get_config_dir() + init_endpoint_configuration_dir(conf_dir) state = CommandState.ensure() endpoint = Endpoint(debug=state.debug) @@ -146,7 +148,7 @@ def config_dir_callback(ctx, param, value): state.endpoint_config_dir = value -@click.group("funcx-endpoint") +@click.group("globus-compute-endpoint") @click.option( "-c", "--config-dir", @@ -164,10 +166,10 @@ def app(): @app.command("version") @common_options def version_command(): - """Show the version of funcx-endpoint""" - import funcx_endpoint + """Show the version of globus-compute-endpoint""" + import globus_compute_endpoint as gce - click.echo(f"FuncX endpoint version: {funcx_endpoint.__version__}") + click.echo(f"Globus Compute endpoint version: {gce.__version__}") @app.command(name="configure", help="Configure an endpoint") @@ -192,8 +194,8 @@ def configure_endpoint( Drops a config.py template into the funcx configs directory. The template usually goes to ~/.funcx//config.py """ - funcx_dir = get_config_dir() - ep_dir = funcx_dir / name + compute_dir = get_config_dir() + ep_dir = compute_dir / name Endpoint.configure_endpoint(ep_dir, endpoint_config, multi_tenant) @@ -271,8 +273,8 @@ def _do_logout_endpoints( Returns False, error_msg if token revocation was not done """ if running_endpoints is None: - funcx_dir = get_config_dir() - running_endpoints = Endpoint.get_running_endpoints(funcx_dir) + compute_dir = get_config_dir() + running_endpoints = Endpoint.get_running_endpoints(compute_dir) tokens_revoked = False error_msg = None if running_endpoints and not force: @@ -293,6 +295,82 @@ def _do_logout_endpoints( return tokens_revoked, error_msg +FUNCX_COMPUTE_IMPORT_UPDATES = { + "from funcx_endpoint.endpoint.utils.config": "from globus_compute_endpoint.endpoint.utils.config", # noqa E501 + "from funcx_endpoint.executors": "from globus_compute_endpoint.executors", # noqa E501 +} + + +def _upgrade_funcx_imports_in_config(name: str, force=False) -> str: + """ + This only modifies unindented import lines, as are in the original + config.py. Indented matching lines are user created and would have to be + updated manually by the user. + + The force flag will overwrite an existing (non-directory) config.py.bak + + This method uses a randomly generated intermediate output file in case + there are any permission or unforeseen file system issues + """ + ep_dir = get_config_dir() / name + config_path = ep_dir / "config.py" + config_backup = ep_dir / "config.py.bak" + + try: + config_text = config_path.read_text() + upd_config_text = config_text + + for original, repl in FUNCX_COMPUTE_IMPORT_UPDATES.items(): + upd_config_text = upd_config_text.replace(original, repl) + + if upd_config_text == config_text: + return f"No funcX import statements found in config.py for {name}" + + change_diff = "".join( + difflib.unified_diff( + config_text.splitlines(keepends=True), + upd_config_text.splitlines(keepends=True), + n=3, # Typical 3 lines of context + ) + ) + + if config_backup.exists() and not force: + msg = ( + f"{config_backup} already exists.\n" + "Rename it or use the --force flag to update config." + ) + raise ClickException(msg) + elif config_backup.is_dir(): + msg = ( + f"{config_backup} is a directory.\n" + "Rename it before proceeding with config update." + ) + raise ClickException(msg) + + # Write to temporary file in case of issues + tmp_output_path = ep_dir / ("config.py." + uuid.uuid4().hex) + tmp_output_path.write_text(upd_config_text) + + # Rename files last, as it's the least likely to err + config_backup.unlink(missing_ok=True) + shutil.move(config_path, config_backup) # Preserve file timestamp + shutil.move(tmp_output_path, config_path) + + return ( + f"Applied following diff for endpoint {name}:\n{change_diff}\n\n" + f" The previous config has been renamed to {config_backup}" + ) + + except FileNotFoundError as err: + msg = f"No config.py was found for endpoint ({name}) in {ep_dir}" + raise ClickException(msg) from err + except ClickException: + raise + except Exception as err: + msg = f"Unknown Exception {err} attempting to reformat config.py in {ep_dir}" + raise ClickException(msg) from err + + def read_config(endpoint_dir: pathlib.Path) -> Config: endpoint_name = endpoint_dir.name @@ -309,7 +387,7 @@ def read_config(endpoint_dir: pathlib.Path) -> Config: except FileNotFoundError as err: if not endpoint_dir.exists(): - configure_command = "funcx-endpoint configure" + configure_command = "globus-compute-endpoint configure" if endpoint_name != "default": configure_command += f" {endpoint_name}" msg = ( @@ -336,10 +414,32 @@ def read_config(endpoint_dir: pathlib.Path) -> Config: ) raise ClickException(msg) from err + except ModuleNotFoundError as err: + # Catch specific error when old config.py references funcx_endpoint + if "No module named 'funcx_endpoint'" in err.msg: + msg = ( + f"{conf_path} contains import statements from a previously " + "configured endpoint that uses the (deprecated) " + "funcx-endpoint library. Please update the imports to reference " + "globus_compute_endpoint.\n\ni.e.\n" + " from funcx_endpoint.endpoint.utils.config -> " + "from globus_compute_endpoint.endpoint.utils.config\n" + " from funcx_endpoint.executors -> " + "from globus_compute_endpoint.executors\n" + "\n" + "You can also use the command " + "`globus-compute-endpoint update_funcx_config [endpoint_name]` " + "to update them\n" + ) + raise ClickException(msg) from err + else: + log.exception(err.msg) + raise + except Exception: log.exception( - "funcX v0.2.0 made several non-backwards compatible changes to the config. " - "Your config might be out of date. " + "Globus Compute v0.2.0 made several non-backwards compatible changes to " + "the config. Your config might be out of date. " "Refer to " "https://funcx.readthedocs.io/en/latest/endpoints.html#configuring-funcx" ) @@ -406,6 +506,23 @@ def stop_endpoint(*, name: str, remote: bool): _do_stop_endpoint(name=name, remote=remote) +@app.command("update_funcx_config") +@name_arg +@click.option( + "--force", + is_flag=True, + default=False, + help="update config and backup to config.py.bak even if it already exists", +) +def update_funcx_endpoint_config(*, name: str, force: bool): + """ + Update imports file from funcx_endpoint.* to globus_compute_endpoint.* + + Either should raise ClickException or returns modification result message + """ + print(_upgrade_funcx_imports_in_config(name=name, force=force)) + + def _do_stop_endpoint(*, name: str, remote: bool = False) -> None: ep_dir = get_config_dir() / name Endpoint.stop_endpoint(ep_dir, read_config(ep_dir), remote=remote) @@ -430,8 +547,8 @@ def restart_endpoint(*, name: str, **_kwargs): @common_options def list_endpoints(): """List all available endpoints""" - funcx_dir = get_config_dir() - Endpoint.print_endpoint_table(funcx_dir) + compute_dir = get_config_dir() + Endpoint.print_endpoint_table(compute_dir) @app.command("delete") @@ -462,5 +579,21 @@ def cli_run(): app() +def cli_run_funcx(): + """Entry point that prints a custom message. i.e. deprecation warnings""" + fmt = DEPRECATION_FUNCX_ENDPOINT + + # Colorized notice to be a bit more visible + fmt = "{title}DEPRECATION NOTICE{rs}\n{body}" + fmt + "{rs}" + title = frs = body = rs = "" + if sys.stderr.isatty(): + title = "\033[37;41m" # White FG, Red BG + body = "\033[33m" # Yellow FG + rs = "\033[0m" # Reset colors + + print(fmt.format(title=title, body=body, frs=frs, rs=rs), file=sys.stderr) + app() + + if __name__ == "__main__": app() diff --git a/funcx_endpoint/funcx_endpoint/endpoint/README.rst b/compute_endpoint/globus_compute_endpoint/endpoint/README.rst similarity index 87% rename from funcx_endpoint/funcx_endpoint/endpoint/README.rst rename to compute_endpoint/globus_compute_endpoint/endpoint/README.rst index 0da07eae0..065095c96 100644 --- a/funcx_endpoint/funcx_endpoint/endpoint/README.rst +++ b/compute_endpoint/globus_compute_endpoint/endpoint/README.rst @@ -1,7 +1,7 @@ Endpoint UX-Design ------------------ -Here are the basic functions we want from the `funcx-endpoint`: +Here are the basic functions we want from the `globus-compute-endpoint`: * init : One time initialization that sets up the global options for the site at which the endpoint is being deployed diff --git a/funcx_endpoint/funcx_endpoint/endpoint/__init__.py b/compute_endpoint/globus_compute_endpoint/endpoint/__init__.py similarity index 100% rename from funcx_endpoint/funcx_endpoint/endpoint/__init__.py rename to compute_endpoint/globus_compute_endpoint/endpoint/__init__.py diff --git a/funcx_endpoint/funcx_endpoint/endpoint/config.py b/compute_endpoint/globus_compute_endpoint/endpoint/config.py similarity index 100% rename from funcx_endpoint/funcx_endpoint/endpoint/config.py rename to compute_endpoint/globus_compute_endpoint/endpoint/config.py diff --git a/funcx_endpoint/funcx_endpoint/endpoint/default_config.py b/compute_endpoint/globus_compute_endpoint/endpoint/default_config.py similarity index 81% rename from funcx_endpoint/funcx_endpoint/endpoint/default_config.py rename to compute_endpoint/globus_compute_endpoint/endpoint/default_config.py index 81ce7de7a..8b51a19ba 100644 --- a/funcx_endpoint/funcx_endpoint/endpoint/default_config.py +++ b/compute_endpoint/globus_compute_endpoint/endpoint/default_config.py @@ -1,8 +1,7 @@ +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.executors import HighThroughputExecutor from parsl.providers import LocalProvider -from funcx_endpoint.endpoint.utils.config import Config -from funcx_endpoint.executors import HighThroughputExecutor - config = Config( executors=[ HighThroughputExecutor( diff --git a/funcx_endpoint/funcx_endpoint/endpoint/endpoint.py b/compute_endpoint/globus_compute_endpoint/endpoint/endpoint.py similarity index 93% rename from funcx_endpoint/funcx_endpoint/endpoint/endpoint.py rename to compute_endpoint/globus_compute_endpoint/endpoint/endpoint.py index 6bb15eda1..c1bae07c7 100644 --- a/funcx_endpoint/funcx_endpoint/endpoint/endpoint.py +++ b/compute_endpoint/globus_compute_endpoint/endpoint/endpoint.py @@ -19,17 +19,16 @@ import psutil import setproctitle import texttable +from globus_compute_endpoint import __version__ +from globus_compute_endpoint.endpoint import default_config as endpoint_default_config +from globus_compute_endpoint.endpoint.interchange import EndpointInterchange +from globus_compute_endpoint.endpoint.result_store import ResultStore +from globus_compute_endpoint.endpoint.utils import _redact_url_creds +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.logging_config import setup_logging +from globus_compute_sdk.sdk.client import Client from globus_sdk import GlobusAPIError, NetworkError -from funcx.sdk.client import FuncXClient -from funcx_endpoint import __version__ -from funcx_endpoint.endpoint import default_config as endpoint_default_config -from funcx_endpoint.endpoint.interchange import EndpointInterchange -from funcx_endpoint.endpoint.result_store import ResultStore -from funcx_endpoint.endpoint.utils import _redact_url_creds -from funcx_endpoint.endpoint.utils.config import Config -from funcx_endpoint.logging_config import setup_logging - log = logging.getLogger(__name__) @@ -87,7 +86,7 @@ def init_endpoint_dir( :param endpoint_dir pathlib.Path Path to the endpoint configuration dir :param endpoint_config str Path to a config file to be used instead - of the funcX default config file + of the Globus Compute default config file :param multi_tenant bool Whether the endpoint is a multi-user endpoint """ log.debug(f"Creating endpoint dir {endpoint_dir}") @@ -133,7 +132,7 @@ def configure_endpoint( print( f"\n Configuration file: {config_path}\n" "\nUse the `start` subcommand to run it:\n" - f"\n $ funcx-endpoint start {ep_name}" + f"\n $ globus-compute-endpoint start {ep_name}" ) @staticmethod @@ -155,16 +154,16 @@ def get_or_create_endpoint_uuid( return ep_id @staticmethod - def get_funcx_client(config: Config | None) -> FuncXClient: + def get_funcx_client(config: Config | None) -> Client: if config: funcx_client_options = { "funcx_service_address": config.funcx_service_address, "environment": config.environment, } - return FuncXClient(**funcx_client_options) + return Client(**funcx_client_options) else: - return FuncXClient() + return Client() def start_endpoint( self, @@ -261,10 +260,10 @@ def start_endpoint( f"Caught exception while attempting endpoint registration: {e}" ) log.critical( - "funcx-endpoint is unable to reach the funcX service due to a " - "NetworkError \n" - "Please make sure that the funcX service address you provided is " - "reachable \n" + "globus-compute-endpoint is unable to reach the Globus Compute " + "service due to a NetworkError \n" + "Please make sure that the Globus Compute service address you " + "provided is reachable \n" "and then attempt restarting the endpoint" ) exit(os.EX_TEMPFAIL) @@ -290,12 +289,12 @@ def start_endpoint( json_file = endpoint_dir / "endpoint.json" # `endpoint_id` key kept for backward compatibility when - # funcx-endpoint list is called + # globus-compute-endpoint list is called ep_info = {"endpoint_id": endpoint_uuid} json_file.write_text(json.dumps(ep_info)) log.debug(f"Registration info written to {json_file}") - ptitle = f"funcX Endpoint ({endpoint_uuid}, {endpoint_dir.name})" + ptitle = f"Globus Compute Endpoint ({endpoint_uuid}, {endpoint_dir.name})" if endpoint_config.environment: ptitle += f" - {endpoint_config.environment}" ptitle += f" [{setproctitle.getproctitle()}]" @@ -472,10 +471,10 @@ def delete_endpoint( ) if not force: log.critical( - "funcx-endpoint is unable to reach the funcX service due to a " - "NetworkError \n" - "Please make sure that the funcX service address you provided is " - "reachable \n" + "globus-compute-endpoint is unable to reach the Globus Compute " + "service due to a NetworkError \n" + "Please make sure that the Globus Compute service address you " + "provided is reachable \n" "and then attempt to delete the endpoint again" ) exit(os.EX_TEMPFAIL) @@ -590,7 +589,8 @@ def print_endpoint_table(conf_dir: pathlib.Path, ofile=None): endpoints = Endpoint.get_endpoints(conf_dir) if not endpoints: print( - "No endpoints configured!\n\n (Hint: funcx-endpoint configure)", + "No endpoints configured!\n\n " + "(Hint: globus-compute-endpoint configure)", file=ofile, ) return diff --git a/funcx_endpoint/funcx_endpoint/endpoint/endpoint_manager.py b/compute_endpoint/globus_compute_endpoint/endpoint/endpoint_manager.py similarity index 96% rename from funcx_endpoint/funcx_endpoint/endpoint/endpoint_manager.py rename to compute_endpoint/globus_compute_endpoint/endpoint/endpoint_manager.py index f06fc5f32..e00d51278 100644 --- a/funcx_endpoint/funcx_endpoint/endpoint/endpoint_manager.py +++ b/compute_endpoint/globus_compute_endpoint/endpoint/endpoint_manager.py @@ -16,17 +16,16 @@ import typing as t from datetime import datetime +import globus_compute_sdk as gc import setproctitle -from globus_sdk import GlobusAPIError, NetworkError - -import funcx -from funcx_endpoint import __version__ -from funcx_endpoint.endpoint.endpoint import Endpoint -from funcx_endpoint.endpoint.rabbit_mq.command_queue_subscriber import ( +from globus_compute_endpoint import __version__ +from globus_compute_endpoint.endpoint.endpoint import Endpoint +from globus_compute_endpoint.endpoint.rabbit_mq.command_queue_subscriber import ( CommandQueueSubscriber, ) -from funcx_endpoint.endpoint.utils import _redact_url_creds -from funcx_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.endpoint.utils import _redact_url_creds +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_sdk import GlobusAPIError, NetworkError if t.TYPE_CHECKING: from pika.spec import BasicProperties @@ -63,13 +62,13 @@ def __init__( endpoint_uuid = Endpoint.get_or_create_endpoint_uuid(conf_dir, endpoint_uuid) try: - funcx_client_options = { + client_options = { "funcx_service_address": config.funcx_service_address, "environment": config.environment, } - fxc = funcx.FuncXClient(**funcx_client_options) - reg_info = fxc.register_endpoint( + gcc = gc.Client(**client_options) + reg_info = gcc.register_endpoint( conf_dir.name, endpoint_uuid, metadata=EndpointManager.get_metadata(config), @@ -114,13 +113,13 @@ def __init__( json_file = conf_dir / "endpoint.json" # `endpoint_id` key kept for backward compatibility when - # funcx-endpoint list is called + # globus-compute-endpoint list is called ep_info = {"endpoint_id": endpoint_uuid} json_file.write_text(json.dumps(ep_info)) log.debug(f"Registration info written to {json_file}") # * == "multi-tenant"; not important until it is, so let it be subtle - ptitle = f"funcX Endpoint *({endpoint_uuid}, {conf_dir.name})" + ptitle = f"Globus Compute Endpoint *({endpoint_uuid}, {conf_dir.name})" if config.environment: ptitle += f" - {config.environment}" ptitle += f" [{setproctitle.getproctitle()}]" @@ -381,7 +380,13 @@ def cmd_start_endpoint( if not ep_name: raise InvalidCommandError("Missing endpoint name") - proc_args = ["funcx-endpoint", "start", ep_name, "--die-with-parent", *args] + proc_args = [ + "globus-compute-endpoint", + "start", + ep_name, + "--die-with-parent", + *args, + ] pw_rec = pwd.getpwnam(local_username) udir, uid, gid = pw_rec.pw_dir, pw_rec.pw_uid, pw_rec.pw_gid diff --git a/funcx_endpoint/funcx_endpoint/endpoint/interchange.py b/compute_endpoint/globus_compute_endpoint/endpoint/interchange.py similarity index 92% rename from funcx_endpoint/funcx_endpoint/endpoint/interchange.py rename to compute_endpoint/globus_compute_endpoint/endpoint/interchange.py index ae0d86c8d..7a1461884 100644 --- a/funcx_endpoint/funcx_endpoint/endpoint/interchange.py +++ b/compute_endpoint/globus_compute_endpoint/endpoint/interchange.py @@ -16,28 +16,33 @@ # see: https://github.com/python/typeshed/issues/4266 from multiprocessing.synchronize import Event as EventType +import globus_compute_endpoint.endpoint.utils.config import pika.exceptions import setproctitle -from funcx_common.messagepack import InvalidMessageError, pack, unpack -from funcx_common.messagepack.message_types import ( +from globus_compute_common.messagepack import InvalidMessageError, pack, unpack +from globus_compute_common.messagepack.message_types import ( EPStatusReport, Result, ResultErrorDetails, Task, ) -from parsl.version import VERSION as PARSL_VERSION - -import funcx_endpoint.endpoint.utils.config -from funcx import __version__ as funcx_sdk_version -from funcx_endpoint import __version__ as funcx_endpoint_version -from funcx_endpoint.endpoint.messages_compat import ( +from globus_compute_endpoint import __version__ as funcx_endpoint_version +from globus_compute_endpoint.endpoint.messages_compat import ( convert_to_internaltask, try_convert_to_messagepack, ) -from funcx_endpoint.endpoint.rabbit_mq import ResultQueuePublisher, TaskQueueSubscriber -from funcx_endpoint.endpoint.result_store import ResultStore -from funcx_endpoint.exception_handling import get_error_string, get_result_error_details -from funcx_endpoint.executors.high_throughput.mac_safe_queue import mpQueue +from globus_compute_endpoint.endpoint.rabbit_mq import ( + ResultQueuePublisher, + TaskQueueSubscriber, +) +from globus_compute_endpoint.endpoint.result_store import ResultStore +from globus_compute_endpoint.exception_handling import ( + get_error_string, + get_result_error_details, +) +from globus_compute_endpoint.executors.high_throughput.mac_safe_queue import mpQueue +from globus_compute_sdk import __version__ as funcx_sdk_version +from parsl.version import VERSION as PARSL_VERSION log = logging.getLogger(__name__) @@ -56,7 +61,7 @@ class EndpointInterchange: def __init__( self, - config: funcx_endpoint.endpoint.utils.config.Config, + config: globus_compute_endpoint.endpoint.utils.config.Config, reg_info: dict[str, dict], logdir=".", endpoint_id=None, @@ -68,14 +73,14 @@ def __init__( """ Parameters ---------- - config : funcx.Config object - Funcx config object that describes how compute should be provisioned + config : globus_compute_sdk.Config object + Globus Compute config object describing how compute should be provisioned reg_info : dict[str, dict] Dictionary containing connection information for both the task and result queues. The required data structure is returned from the Endpoint registration API call, encapsulated in the SDK by - `FuncXClient.register_endpoint()`. + `Client.register_endpoint()`. logdir : str Parsl log directory paths. Logs and temp files go here. Default: '.' @@ -143,7 +148,9 @@ def load_config(self): log.info("Loading endpoint local config") self.results_passthrough = mpQueue() - self.executors: dict[str, funcx_endpoint.executors.HighThroughputExecutor] = {} + self.executors: dict[ + str, globus_compute_endpoint.executors.HighThroughputExecutor + ] = {} for executor in self.config.executors: log.info(f"Initializing executor: {executor.label}") executor.funcx_service_address = self.config.funcx_service_address @@ -331,9 +338,9 @@ def _main_loop(self): """ This is the "kernel" of the endpoint interchange process. Conceptually, there are three actions of consequence: forward task messages to the executors, - forward results from the executors back to the funcx web services (RMQ), and - forward any previous results that may have failed to send previously (e.g., if - a RMQ connection was dropped). + forward results from the executors back to the Globus Compute web services + (RMQ), and forward any previous results that may have failed to send previously + (e.g., if a RMQ connection was dropped). We accomplish this via three threads, one each for each task. @@ -375,8 +382,8 @@ def process_stored_results(): def process_pending_tasks(): # Pull tasks from upstream (RMQ) and send them down the ZMQ pipe to the - # funcx-manager. In terms of shutting down (or "rebooting") gracefully, - # iterate once a second -- regardless of whether a task has arrived. + # globus-compute-manager. In terms of shutting down (or "rebooting") + # gracefully, iterate once a second whether or not a task has arrived. nonlocal num_tasks_forwarded ctype = executor.container_type while not self._quiesce_event.is_set(): @@ -414,9 +421,10 @@ def process_pending_tasks(): log.debug("Exit process-pending-tasks thread.") def process_pending_results(): - # Forward incoming results from the funcx-manager to the funcx-services. - # For graceful handling of shutdown (or "reboot"), wait up to a second - # for incoming results before iterating the loop regardless. + # Forward incoming results from the globus-compute-manager to the + # Globus Compute services. For graceful handling of shutdown + # (or "reboot"), wait up to a second or incoming results before + # iterating the loop regardless. nonlocal num_results_forwarded while not self._quiesce_event.is_set(): try: diff --git a/funcx_endpoint/funcx_endpoint/endpoint/messages_compat.py b/compute_endpoint/globus_compute_endpoint/endpoint/messages_compat.py similarity index 76% rename from funcx_endpoint/funcx_endpoint/endpoint/messages_compat.py rename to compute_endpoint/globus_compute_endpoint/endpoint/messages_compat.py index 5797c0d24..377b61769 100644 --- a/funcx_endpoint/funcx_endpoint/endpoint/messages_compat.py +++ b/compute_endpoint/globus_compute_endpoint/endpoint/messages_compat.py @@ -4,22 +4,23 @@ import pickle import uuid -from funcx_common.messagepack import Message as OutgoingMessage -from funcx_common.messagepack import pack -from funcx_common.messagepack.message_types import ( +from globus_compute_common.messagepack import Message as OutgoingMessage +from globus_compute_common.messagepack import pack +from globus_compute_common.messagepack.message_types import ( EPStatusReport as OutgoingEPStatusReport, ) -from funcx_common.messagepack.message_types import Result as OutgoingResult -from funcx_common.messagepack.message_types import ( +from globus_compute_common.messagepack.message_types import Result as OutgoingResult +from globus_compute_common.messagepack.message_types import ( ResultErrorDetails as OutgoingResultErrorDetails, ) -from funcx_common.messagepack.message_types import Task as OutgoingTask -from funcx_common.messagepack.message_types import TaskTransition - -from funcx_endpoint.executors.high_throughput.messages import ( +from globus_compute_common.messagepack.message_types import Task as OutgoingTask +from globus_compute_common.messagepack.message_types import TaskTransition +from globus_compute_endpoint.executors.high_throughput.messages import ( EPStatusReport as InternalEPStatusReport, ) -from funcx_endpoint.executors.high_throughput.messages import Task as InternalTask +from globus_compute_endpoint.executors.high_throughput.messages import ( + Task as InternalTask, +) logger = logging.getLogger(__name__) diff --git a/funcx_endpoint/funcx_endpoint/endpoint/rabbit_mq/README.rst b/compute_endpoint/globus_compute_endpoint/endpoint/rabbit_mq/README.rst similarity index 100% rename from funcx_endpoint/funcx_endpoint/endpoint/rabbit_mq/README.rst rename to compute_endpoint/globus_compute_endpoint/endpoint/rabbit_mq/README.rst diff --git a/funcx_endpoint/funcx_endpoint/endpoint/rabbit_mq/__init__.py b/compute_endpoint/globus_compute_endpoint/endpoint/rabbit_mq/__init__.py similarity index 100% rename from funcx_endpoint/funcx_endpoint/endpoint/rabbit_mq/__init__.py rename to compute_endpoint/globus_compute_endpoint/endpoint/rabbit_mq/__init__.py diff --git a/funcx_endpoint/funcx_endpoint/endpoint/rabbit_mq/base.py b/compute_endpoint/globus_compute_endpoint/endpoint/rabbit_mq/base.py similarity index 100% rename from funcx_endpoint/funcx_endpoint/endpoint/rabbit_mq/base.py rename to compute_endpoint/globus_compute_endpoint/endpoint/rabbit_mq/base.py diff --git a/funcx_endpoint/funcx_endpoint/endpoint/rabbit_mq/command_queue_subscriber.py b/compute_endpoint/globus_compute_endpoint/endpoint/rabbit_mq/command_queue_subscriber.py similarity index 100% rename from funcx_endpoint/funcx_endpoint/endpoint/rabbit_mq/command_queue_subscriber.py rename to compute_endpoint/globus_compute_endpoint/endpoint/rabbit_mq/command_queue_subscriber.py diff --git a/funcx_endpoint/funcx_endpoint/endpoint/rabbit_mq/result_queue_publisher.py b/compute_endpoint/globus_compute_endpoint/endpoint/rabbit_mq/result_queue_publisher.py similarity index 100% rename from funcx_endpoint/funcx_endpoint/endpoint/rabbit_mq/result_queue_publisher.py rename to compute_endpoint/globus_compute_endpoint/endpoint/rabbit_mq/result_queue_publisher.py diff --git a/funcx_endpoint/funcx_endpoint/endpoint/rabbit_mq/task_queue_subscriber.py b/compute_endpoint/globus_compute_endpoint/endpoint/rabbit_mq/task_queue_subscriber.py similarity index 100% rename from funcx_endpoint/funcx_endpoint/endpoint/rabbit_mq/task_queue_subscriber.py rename to compute_endpoint/globus_compute_endpoint/endpoint/rabbit_mq/task_queue_subscriber.py diff --git a/funcx_endpoint/funcx_endpoint/endpoint/result_store.py b/compute_endpoint/globus_compute_endpoint/endpoint/result_store.py similarity index 100% rename from funcx_endpoint/funcx_endpoint/endpoint/result_store.py rename to compute_endpoint/globus_compute_endpoint/endpoint/result_store.py diff --git a/funcx_endpoint/funcx_endpoint/endpoint/taskqueue.py b/compute_endpoint/globus_compute_endpoint/endpoint/taskqueue.py similarity index 100% rename from funcx_endpoint/funcx_endpoint/endpoint/taskqueue.py rename to compute_endpoint/globus_compute_endpoint/endpoint/taskqueue.py diff --git a/funcx_endpoint/funcx_endpoint/endpoint/utils/__init__.py b/compute_endpoint/globus_compute_endpoint/endpoint/utils/__init__.py similarity index 100% rename from funcx_endpoint/funcx_endpoint/endpoint/utils/__init__.py rename to compute_endpoint/globus_compute_endpoint/endpoint/utils/__init__.py diff --git a/funcx_endpoint/funcx_endpoint/endpoint/utils/config.py b/compute_endpoint/globus_compute_endpoint/endpoint/utils/config.py similarity index 90% rename from funcx_endpoint/funcx_endpoint/endpoint/utils/config.py rename to compute_endpoint/globus_compute_endpoint/endpoint/utils/config.py index 6fa669933..fc8adbcef 100644 --- a/funcx_endpoint/funcx_endpoint/endpoint/utils/config.py +++ b/compute_endpoint/globus_compute_endpoint/endpoint/utils/config.py @@ -3,15 +3,14 @@ import inspect import warnings +from globus_compute_endpoint.executors import HighThroughputExecutor from parsl.utils import RepresentationMixin -from funcx_endpoint.executors import HighThroughputExecutor - _DEFAULT_EXECUTORS = [HighThroughputExecutor()] class Config(RepresentationMixin): - """Specification of FuncX configuration options. + """Specification of Globus Compute configuration options. Parameters ---------- @@ -28,11 +27,13 @@ class Config(RepresentationMixin): Default: None funcx_service_address: str | None - URL address string of the funcX service to which the Endpoint should connect. + URL address string of the Globus Compute service to which the Endpoint + should connect. Default: None results_ws_uri: str | None - URL address string of the funcX websocket service passed to the funcX client. + URL address string of the Globus Compute websocket service passed to the + Globus Compute client. Default: None warn_about_url_mismatch: Bool @@ -42,12 +43,12 @@ class Config(RepresentationMixin): heartbeat_period: int (seconds) The interval at which heartbeat messages are sent from the endpoint to the - funcx-web-service + Globus Compute web service Default: 30s heartbeat_threshold: int (seconds) - Seconds since the last hearbeat message from the funcx-web-service after which - the connection is assumed to be disconnected. + Seconds since the last hearbeat message from the Globus Compute web service + after which the connection is assumed to be disconnected. Default: 120s idle_heartbeats_soft: int (count) diff --git a/funcx_endpoint/funcx_endpoint/exception_handling.py b/compute_endpoint/globus_compute_endpoint/exception_handling.py similarity index 92% rename from funcx_endpoint/funcx_endpoint/exception_handling.py rename to compute_endpoint/globus_compute_endpoint/exception_handling.py index a2404f9e5..8c2be26e2 100644 --- a/funcx_endpoint/funcx_endpoint/exception_handling.py +++ b/compute_endpoint/globus_compute_endpoint/exception_handling.py @@ -14,8 +14,8 @@ import types import typing as t -from funcx.errors import MaxResultSizeExceeded -from funcx_endpoint.exceptions import CouldNotExecuteUserTaskError +from globus_compute_endpoint.exceptions import CouldNotExecuteUserTaskError +from globus_compute_sdk.errors import MaxResultSizeExceeded INTERNAL_ERROR_CLASSES: tuple[type[Exception], ...] = ( CouldNotExecuteUserTaskError, diff --git a/funcx_endpoint/funcx_endpoint/exceptions.py b/compute_endpoint/globus_compute_endpoint/exceptions.py similarity index 100% rename from funcx_endpoint/funcx_endpoint/exceptions.py rename to compute_endpoint/globus_compute_endpoint/exceptions.py diff --git a/compute_endpoint/globus_compute_endpoint/executors/__init__.py b/compute_endpoint/globus_compute_endpoint/executors/__init__.py new file mode 100644 index 000000000..87abeee88 --- /dev/null +++ b/compute_endpoint/globus_compute_endpoint/executors/__init__.py @@ -0,0 +1,5 @@ +from globus_compute_endpoint.executors.high_throughput.executor import ( + HighThroughputExecutor, +) + +__all__ = ["HighThroughputExecutor"] diff --git a/funcx_endpoint/funcx_endpoint/executors/high_throughput/__init__.py b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/__init__.py similarity index 100% rename from funcx_endpoint/funcx_endpoint/executors/high_throughput/__init__.py rename to compute_endpoint/globus_compute_endpoint/executors/high_throughput/__init__.py diff --git a/funcx_endpoint/funcx_endpoint/executors/high_throughput/container_sched.py b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/container_sched.py similarity index 93% rename from funcx_endpoint/funcx_endpoint/executors/high_throughput/container_sched.py rename to compute_endpoint/globus_compute_endpoint/executors/high_throughput/container_sched.py index 5c5db78c9..937cd8270 100644 --- a/funcx_endpoint/funcx_endpoint/executors/high_throughput/container_sched.py +++ b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/container_sched.py @@ -2,9 +2,9 @@ import math import random -from funcx_endpoint.logging_config import FXLogger +from globus_compute_endpoint.logging_config import ComputeLogger -log: FXLogger = logging.getLogger(__name__) # type: ignore +log: ComputeLogger = logging.getLogger(__name__) # type: ignore def naive_scheduler( diff --git a/funcx_endpoint/funcx_endpoint/executors/high_throughput/executor.py b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/executor.py similarity index 98% rename from funcx_endpoint/funcx_endpoint/executors/high_throughput/executor.py rename to compute_endpoint/globus_compute_endpoint/executors/high_throughput/executor.py index b04f31f9f..07cd4c959 100644 --- a/funcx_endpoint/funcx_endpoint/executors/high_throughput/executor.py +++ b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/executor.py @@ -18,25 +18,24 @@ import daemon import dill -from parsl.dataflow.error import ConfigurationError -from parsl.executors.errors import BadMessage, ScalingFailed -from parsl.providers import LocalProvider -from parsl.utils import RepresentationMixin - -from funcx.serialize import FuncXSerializer -from funcx_endpoint.executors.high_throughput import interchange, zmq_pipes -from funcx_endpoint.executors.high_throughput.mac_safe_queue import mpQueue -from funcx_endpoint.executors.high_throughput.messages import ( +from globus_compute_endpoint.executors.high_throughput import interchange, zmq_pipes +from globus_compute_endpoint.executors.high_throughput.mac_safe_queue import mpQueue +from globus_compute_endpoint.executors.high_throughput.messages import ( EPStatusReport, Heartbeat, HeartbeatReq, Task, TaskCancel, ) -from funcx_endpoint.logging_config import setup_logging -from funcx_endpoint.strategies.simple import SimpleStrategy +from globus_compute_endpoint.logging_config import setup_logging +from globus_compute_endpoint.strategies.simple import SimpleStrategy +from globus_compute_sdk.serialize import ComputeSerializer +from parsl.dataflow.error import ConfigurationError +from parsl.executors.errors import BadMessage, ScalingFailed +from parsl.providers import LocalProvider +from parsl.utils import RepresentationMixin -fx_serializer = FuncXSerializer() +fx_serializer = ComputeSerializer() # TODO: YADU There's a bug here which causes some of the log messages to write out to @@ -335,7 +334,7 @@ def __init__( f"to accelerators: {self.available_accelerators}" ) - # FuncX specific options + # Globus Compute specific options self.container_image = container_image self.worker_mode = worker_mode diff --git a/funcx_endpoint/funcx_endpoint/executors/high_throughput/interchange.py b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/interchange.py similarity index 98% rename from funcx_endpoint/funcx_endpoint/executors/high_throughput/interchange.py rename to compute_endpoint/globus_compute_endpoint/executors/high_throughput/interchange.py index 3a0d34aa4..5aab39b8f 100644 --- a/funcx_endpoint/funcx_endpoint/executors/high_throughput/interchange.py +++ b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/interchange.py @@ -16,28 +16,30 @@ import daemon import dill import zmq -from funcx_common.messagepack.message_types import TaskTransition -from funcx_common.tasks import ActorName, TaskState -from parsl.version import VERSION as PARSL_VERSION - -from funcx.serialize import FuncXSerializer -from funcx_endpoint.exception_handling import get_error_string, get_result_error_details -from funcx_endpoint.executors.high_throughput.interchange_task_dispatch import ( +from globus_compute_common.messagepack.message_types import TaskTransition +from globus_compute_common.tasks import ActorName, TaskState +from globus_compute_endpoint.exception_handling import ( + get_error_string, + get_result_error_details, +) +from globus_compute_endpoint.executors.high_throughput.interchange_task_dispatch import ( # noqa: E501 naive_interchange_task_dispatch, ) -from funcx_endpoint.executors.high_throughput.messages import ( +from globus_compute_endpoint.executors.high_throughput.messages import ( BadCommand, EPStatusReport, Heartbeat, Message, MessageType, ) -from funcx_endpoint.logging_config import FXLogger +from globus_compute_endpoint.logging_config import ComputeLogger +from globus_compute_sdk.serialize import ComputeSerializer +from parsl.version import VERSION as PARSL_VERSION if t.TYPE_CHECKING: import multiprocessing as mp -log: FXLogger = logging.getLogger(__name__) # type: ignore +log: ComputeLogger = logging.getLogger(__name__) # type: ignore HEARTBEAT_CODE = (2**32) - 1 PKL_HEARTBEAT_CODE = dill.dumps(HEARTBEAT_CODE) @@ -123,8 +125,9 @@ def __init__( """ Parameters ---------- - config : funcx.Config object - Funcx config object that describes how compute should be provisioned + config : globus_compute_sdk.Config object + Globus Compute config object that describes how compute should + be provisioned client_address : str The ip address at which the parsl client can be reached. @@ -217,7 +220,7 @@ def __init__( # initialize the last heartbeat time to start the loop self.last_heartbeat = time.time() - self.serializer = FuncXSerializer() + self.serializer = ComputeSerializer() log.info( "Attempting connection to forwarder at {} on ports: {},{},{}".format( client_address, client_ports[0], client_ports[1], client_ports[2] @@ -297,7 +300,7 @@ def __init__( self.last_core_hr_counter = 0 if not launch_cmd: self.launch_cmd = ( - "funcx-manager {debug} {max_workers} " + "globus-compute-manager {debug} {max_workers} " "-c {cores_per_worker} " "--poll {poll_period} " "--task_url={task_url} " @@ -1159,7 +1162,7 @@ def starter(comm_q: mp.Queue, *args, **kwargs) -> None: def cli_run(): - from funcx_endpoint.logging_config import setup_logging + from globus_compute_endpoint.logging_config import setup_logging parser = argparse.ArgumentParser() parser.add_argument("-c", "--client_address", required=True, help="Client address") diff --git a/funcx_endpoint/funcx_endpoint/executors/high_throughput/interchange_task_dispatch.py b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/interchange_task_dispatch.py similarity index 98% rename from funcx_endpoint/funcx_endpoint/executors/high_throughput/interchange_task_dispatch.py rename to compute_endpoint/globus_compute_endpoint/executors/high_throughput/interchange_task_dispatch.py index 5c30e5b4e..dd3c57489 100644 --- a/funcx_endpoint/funcx_endpoint/executors/high_throughput/interchange_task_dispatch.py +++ b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/interchange_task_dispatch.py @@ -5,9 +5,9 @@ import queue import random -from funcx_endpoint.logging_config import FXLogger +from globus_compute_endpoint.logging_config import ComputeLogger -log: FXLogger = logging.getLogger(__name__) # type: ignore +log: ComputeLogger = logging.getLogger(__name__) # type: ignore log.info("Interchange task dispatch started") diff --git a/funcx_endpoint/funcx_endpoint/executors/high_throughput/mac_safe_queue.py b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/mac_safe_queue.py similarity index 100% rename from funcx_endpoint/funcx_endpoint/executors/high_throughput/mac_safe_queue.py rename to compute_endpoint/globus_compute_endpoint/executors/high_throughput/mac_safe_queue.py diff --git a/funcx_endpoint/funcx_endpoint/executors/high_throughput/funcx_manager.py b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/manager.py similarity index 97% rename from funcx_endpoint/funcx_endpoint/executors/high_throughput/funcx_manager.py rename to compute_endpoint/globus_compute_endpoint/executors/high_throughput/manager.py index ff6837cff..5c1b39e62 100755 --- a/funcx_endpoint/funcx_endpoint/executors/high_throughput/funcx_manager.py +++ b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/manager.py @@ -20,27 +20,31 @@ import dill import psutil import zmq -from funcx_common.messagepack.message_types import TaskTransition -from funcx_common.tasks import ActorName, TaskState -from parsl.version import VERSION as PARSL_VERSION - -from funcx.serialize import FuncXSerializer -from funcx_endpoint.exception_handling import get_error_string, get_result_error_details -from funcx_endpoint.executors.high_throughput.container_sched import naive_scheduler -from funcx_endpoint.executors.high_throughput.mac_safe_queue import mpQueue -from funcx_endpoint.executors.high_throughput.messages import ( +from globus_compute_common.messagepack.message_types import TaskTransition +from globus_compute_common.tasks import ActorName, TaskState +from globus_compute_endpoint.exception_handling import ( + get_error_string, + get_result_error_details, +) +from globus_compute_endpoint.executors.high_throughput.container_sched import ( + naive_scheduler, +) +from globus_compute_endpoint.executors.high_throughput.mac_safe_queue import mpQueue +from globus_compute_endpoint.executors.high_throughput.messages import ( ManagerStatusReport, Message, Task, ) -from funcx_endpoint.executors.high_throughput.worker_map import WorkerMap -from funcx_endpoint.logging_config import FXLogger, setup_logging +from globus_compute_endpoint.executors.high_throughput.worker_map import WorkerMap +from globus_compute_endpoint.logging_config import ComputeLogger, setup_logging +from globus_compute_sdk.serialize import ComputeSerializer +from parsl.version import VERSION as PARSL_VERSION RESULT_TAG = 10 TASK_REQUEST_TAG = 11 HEARTBEAT_CODE = (2**32) - 1 -log: FXLogger = logging.getLogger(__name__) # type: ignore +log: ComputeLogger = logging.getLogger(__name__) # type: ignore class TaskCancelled(Exception): @@ -237,7 +241,7 @@ def __init__( self.heartbeat_period = heartbeat_period self.heartbeat_threshold = heartbeat_threshold self.poll_period = poll_period - self.serializer = FuncXSerializer() + self.serializer = ComputeSerializer() self.next_worker_q: list[str] = [] # FIFO queue for spinning up workers. self.worker_procs: dict[str, subprocess.Popen] = {} diff --git a/funcx_endpoint/funcx_endpoint/executors/high_throughput/messages.py b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/messages.py similarity index 98% rename from funcx_endpoint/funcx_endpoint/executors/high_throughput/messages.py rename to compute_endpoint/globus_compute_endpoint/executors/high_throughput/messages.py index 0f212da2e..f7b6be5bc 100644 --- a/funcx_endpoint/funcx_endpoint/executors/high_throughput/messages.py +++ b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/messages.py @@ -7,7 +7,7 @@ from enum import Enum, auto from struct import Struct -from funcx_common.messagepack.message_types import TaskTransition +from globus_compute_common.messagepack.message_types import TaskTransition MESSAGE_TYPE_FORMATTER = Struct("b") @@ -103,7 +103,7 @@ def pack(self) -> bytes: # rather than thinking hard, preserve the exact current runtime behavior # # all of this code is going to be eliminated soonish by - # funcx_common.messagepack in part because of issues like this + # globus_compute_common.messagepack in part because of issues like this add_ons = ( f"TID={self.task_id};CID={self.container_id};" # type: ignore f"{self.task_buffer}" diff --git a/funcx_endpoint/funcx_endpoint/executors/high_throughput/funcx_worker.py b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/worker.py similarity index 90% rename from funcx_endpoint/funcx_endpoint/executors/high_throughput/funcx_worker.py rename to compute_endpoint/globus_compute_endpoint/executors/high_throughput/worker.py index 0b8952290..739d1640c 100644 --- a/funcx_endpoint/funcx_endpoint/executors/high_throughput/funcx_worker.py +++ b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/worker.py @@ -9,16 +9,18 @@ import dill import zmq -from funcx_common import messagepack -from funcx_common.messagepack.message_types import TaskTransition -from funcx_common.tasks import ActorName, TaskState - -from funcx.errors import MaxResultSizeExceeded -from funcx.serialize import FuncXSerializer -from funcx_endpoint.exception_handling import get_error_string, get_result_error_details -from funcx_endpoint.exceptions import CouldNotExecuteUserTaskError -from funcx_endpoint.executors.high_throughput.messages import Message -from funcx_endpoint.logging_config import setup_logging +from globus_compute_common import messagepack +from globus_compute_common.messagepack.message_types import TaskTransition +from globus_compute_common.tasks import ActorName, TaskState +from globus_compute_endpoint.exception_handling import ( + get_error_string, + get_result_error_details, +) +from globus_compute_endpoint.exceptions import CouldNotExecuteUserTaskError +from globus_compute_endpoint.executors.high_throughput.messages import Message +from globus_compute_endpoint.logging_config import setup_logging +from globus_compute_sdk.errors import MaxResultSizeExceeded +from globus_compute_sdk.serialize import ComputeSerializer log = logging.getLogger(__name__) @@ -26,8 +28,8 @@ DEFAULT_RESULT_SIZE_LIMIT_B = DEFAULT_RESULT_SIZE_LIMIT_MB * 1024 * 1024 -class FuncXWorker: - """The FuncX worker +class Worker: + """The Globus Compute worker Parameters ---------- @@ -44,7 +46,7 @@ class FuncXWorker: Maximum result size allowed in Bytes Default = 10 MB - Funcx worker will use the REP sockets to: + Globus Compute worker will use the REP sockets to: task = recv () result = execute(task) send(result) @@ -62,7 +64,7 @@ def __init__( self.address = address self.port = port self.worker_type = worker_type - self.serializer = FuncXSerializer() + self.serializer = ComputeSerializer() self.serialize = self.serializer.serialize self.deserialize = self.serializer.deserialize self.result_size_limit = result_size_limit @@ -230,7 +232,7 @@ def cli_run(): sys.stderr = fe try: - worker = FuncXWorker( + worker = Worker( args.worker_id, args.address, int(args.port), diff --git a/funcx_endpoint/funcx_endpoint/executors/high_throughput/worker_map.py b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/worker_map.py similarity index 98% rename from funcx_endpoint/funcx_endpoint/executors/high_throughput/worker_map.py rename to compute_endpoint/globus_compute_endpoint/executors/high_throughput/worker_map.py index b80803b3d..069baaa86 100644 --- a/funcx_endpoint/funcx_endpoint/executors/high_throughput/worker_map.py +++ b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/worker_map.py @@ -8,9 +8,9 @@ from queue import Empty, Queue from typing import Any -from funcx_endpoint.logging_config import FXLogger +from globus_compute_endpoint.logging_config import ComputeLogger -log: FXLogger = logging.getLogger(__name__) # type: ignore +log: ComputeLogger = logging.getLogger(__name__) # type: ignore class WorkerMap: @@ -321,7 +321,7 @@ def add_worker( self.worker_id_counter += 1 cmd = ( - f"funcx-worker {debug}{worker_id} " + f"globus-compute-worker {debug}{worker_id} " f"-a {address} " f"-p {worker_port} " f"-t {worker_type} " diff --git a/funcx_endpoint/funcx_endpoint/executors/high_throughput/zmq_pipes.py b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/zmq_pipes.py similarity index 98% rename from funcx_endpoint/funcx_endpoint/executors/high_throughput/zmq_pipes.py rename to compute_endpoint/globus_compute_endpoint/executors/high_throughput/zmq_pipes.py index 435b02019..144863094 100644 --- a/funcx_endpoint/funcx_endpoint/executors/high_throughput/zmq_pipes.py +++ b/compute_endpoint/globus_compute_endpoint/executors/high_throughput/zmq_pipes.py @@ -5,8 +5,7 @@ import dill import zmq - -from funcx_endpoint.executors.high_throughput.messages import Message +from globus_compute_endpoint.executors.high_throughput.messages import Message log = logging.getLogger(__name__) diff --git a/funcx_endpoint/funcx_endpoint/logging_config.py b/compute_endpoint/globus_compute_endpoint/logging_config.py similarity index 91% rename from funcx_endpoint/funcx_endpoint/logging_config.py rename to compute_endpoint/globus_compute_endpoint/logging_config.py index 5510254dd..b18d4ec00 100644 --- a/funcx_endpoint/funcx_endpoint/logging_config.py +++ b/compute_endpoint/globus_compute_endpoint/logging_config.py @@ -1,5 +1,5 @@ """ -This module contains logging configuration for the funcx-endpoint application. +This module contains logging configuration for the globus-compute-endpoint application. """ from __future__ import annotations @@ -47,7 +47,7 @@ C_DEBUG_FMT = _C_BASE + f" {COLOR_DEBUG}%(message)s{_r}" -class FuncxConsoleFormatter(logging.Formatter): +class ComputeConsoleFormatter(logging.Formatter): """ For internal use only. This formatter handles output to standard streams in the following way: @@ -158,7 +158,7 @@ def _get_file_dict_config( "version": 1, "formatters": { "streamfmt": { - "()": "funcx_endpoint.logging_config.FuncxConsoleFormatter", + "()": "globus_compute_endpoint.logging_config.ComputeConsoleFormatter", "debug": debug, "no_color": no_color, }, @@ -183,12 +183,12 @@ def _get_file_dict_config( }, }, "loggers": { - "funcx_endpoint": { + "globus_compute_endpoint": { "level": "DEBUG" if debug else "INFO", "handlers": log_handlers, }, - # configure for the funcx SDK as well - "funcx": { + # configure for the Globus Compute SDK as well + "globus_compute_sdk": { "level": "DEBUG" if debug else "WARNING", "handlers": log_handlers, }, @@ -201,7 +201,7 @@ def _get_stream_dict_config(debug: bool, no_color: bool) -> dict: "version": 1, "formatters": { "streamfmt": { - "()": "funcx_endpoint.logging_config.FuncxConsoleFormatter", + "()": "globus_compute_endpoint.logging_config.ComputeConsoleFormatter", "debug": debug, "no_color": no_color, }, @@ -214,12 +214,12 @@ def _get_stream_dict_config(debug: bool, no_color: bool) -> dict: } }, "loggers": { - "funcx_endpoint": { + "globus_compute_endpoint": { "level": "DEBUG", "handlers": ["console"], }, - # configure for the funcx SDK as well - "funcx": { + # configure for the Globus Compute SDK as well + "globus_compute_sdk": { "level": "DEBUG" if debug else "WARNING", "handlers": ["console"], }, @@ -227,14 +227,14 @@ def _get_stream_dict_config(debug: bool, no_color: bool) -> dict: } -class FXLogger(logging.Logger): +class ComputeLogger(logging.Logger): TRACE = logging.DEBUG - 5 def trace(self, msg, *args, **kwargs): - self.log(FXLogger.TRACE, msg, args, **kwargs) + self.log(ComputeLogger.TRACE, msg, args, **kwargs) -logging.setLoggerClass(FXLogger) +logging.setLoggerClass(ComputeLogger) logger = logging.getLogger(__name__) diff --git a/compute_endpoint/globus_compute_endpoint/providers/__init__.py b/compute_endpoint/globus_compute_endpoint/providers/__init__.py new file mode 100644 index 000000000..ee8e5b892 --- /dev/null +++ b/compute_endpoint/globus_compute_endpoint/providers/__init__.py @@ -0,0 +1,3 @@ +from globus_compute_endpoint.providers.kubernetes.kube import KubernetesProvider + +__all__ = ["KubernetesProvider"] diff --git a/funcx_endpoint/funcx_endpoint/providers/kubernetes/__init__.py b/compute_endpoint/globus_compute_endpoint/providers/kubernetes/__init__.py similarity index 100% rename from funcx_endpoint/funcx_endpoint/providers/kubernetes/__init__.py rename to compute_endpoint/globus_compute_endpoint/providers/kubernetes/__init__.py diff --git a/funcx_endpoint/funcx_endpoint/providers/kubernetes/kube.py b/compute_endpoint/globus_compute_endpoint/providers/kubernetes/kube.py similarity index 98% rename from funcx_endpoint/funcx_endpoint/providers/kubernetes/kube.py rename to compute_endpoint/globus_compute_endpoint/providers/kubernetes/kube.py index 5447e7319..8ad6ba50f 100644 --- a/funcx_endpoint/funcx_endpoint/providers/kubernetes/kube.py +++ b/compute_endpoint/globus_compute_endpoint/providers/kubernetes/kube.py @@ -4,12 +4,11 @@ from typing import Any, Dict, List, Optional, Tuple import typeguard +from globus_compute_endpoint.providers.kubernetes.template import template_string from parsl.errors import OptionalModuleMissing from parsl.providers.base import ExecutionProvider from parsl.utils import RepresentationMixin -from funcx_endpoint.providers.kubernetes.template import template_string - try: from kubernetes import client, config @@ -142,7 +141,9 @@ def __init__( # Dictionary that keeps track of jobs, keyed on task_type self.resources_by_task_type: Dict[str, Any] = {} - def submit(self, cmd_string, tasks_per_node, task_type, job_name="funcx-worker"): + def submit( + self, cmd_string, tasks_per_node, task_type, job_name="globus-compute-worker" + ): """Submit a job Args: - cmd_string :(String) - Name of the container to initiate diff --git a/funcx_endpoint/funcx_endpoint/providers/kubernetes/template.py b/compute_endpoint/globus_compute_endpoint/providers/kubernetes/template.py similarity index 100% rename from funcx_endpoint/funcx_endpoint/providers/kubernetes/template.py rename to compute_endpoint/globus_compute_endpoint/providers/kubernetes/template.py diff --git a/compute_endpoint/globus_compute_endpoint/strategies/__init__.py b/compute_endpoint/globus_compute_endpoint/strategies/__init__.py new file mode 100644 index 000000000..d615a47bb --- /dev/null +++ b/compute_endpoint/globus_compute_endpoint/strategies/__init__.py @@ -0,0 +1,5 @@ +from globus_compute_endpoint.strategies.base import BaseStrategy +from globus_compute_endpoint.strategies.kube_simple import KubeSimpleStrategy +from globus_compute_endpoint.strategies.simple import SimpleStrategy + +__all__ = ["BaseStrategy", "SimpleStrategy", "KubeSimpleStrategy"] diff --git a/funcx_endpoint/funcx_endpoint/strategies/base.py b/compute_endpoint/globus_compute_endpoint/strategies/base.py similarity index 98% rename from funcx_endpoint/funcx_endpoint/strategies/base.py rename to compute_endpoint/globus_compute_endpoint/strategies/base.py index 8b9754e83..b5487f2e2 100644 --- a/funcx_endpoint/funcx_endpoint/strategies/base.py +++ b/compute_endpoint/globus_compute_endpoint/strategies/base.py @@ -69,7 +69,8 @@ def start(self, interchange): """Actually start the strategy Parameters ---------- - interchange: funcx.executors.high_throughput.interchange.Interchange + interchange: + globus_compute_endpoint.executors.high_throughput.interchange.Interchange Interchange to bind the strategy to """ self.interchange = interchange diff --git a/funcx_endpoint/funcx_endpoint/strategies/kube_simple.py b/compute_endpoint/globus_compute_endpoint/strategies/kube_simple.py similarity index 96% rename from funcx_endpoint/funcx_endpoint/strategies/kube_simple.py rename to compute_endpoint/globus_compute_endpoint/strategies/kube_simple.py index 5e8fec57e..166e8c7e5 100644 --- a/funcx_endpoint/funcx_endpoint/strategies/kube_simple.py +++ b/compute_endpoint/globus_compute_endpoint/strategies/kube_simple.py @@ -2,10 +2,10 @@ import math import time -from funcx_endpoint.logging_config import FXLogger -from funcx_endpoint.strategies.base import BaseStrategy +from globus_compute_endpoint.logging_config import ComputeLogger +from globus_compute_endpoint.strategies.base import BaseStrategy -log: FXLogger = logging.getLogger(__name__) # type: ignore +log: ComputeLogger = logging.getLogger(__name__) # type: ignore class KubeSimpleStrategy(BaseStrategy): diff --git a/funcx_endpoint/funcx_endpoint/strategies/simple.py b/compute_endpoint/globus_compute_endpoint/strategies/simple.py similarity index 98% rename from funcx_endpoint/funcx_endpoint/strategies/simple.py rename to compute_endpoint/globus_compute_endpoint/strategies/simple.py index 406f157cc..eb361d2b6 100644 --- a/funcx_endpoint/funcx_endpoint/strategies/simple.py +++ b/compute_endpoint/globus_compute_endpoint/strategies/simple.py @@ -4,10 +4,9 @@ import math import time +from globus_compute_endpoint.strategies.base import BaseStrategy from parsl.providers.base import JobState -from funcx_endpoint.strategies.base import BaseStrategy - log = logging.getLogger(__name__) diff --git a/funcx_endpoint/funcx_endpoint/strategies/test.py b/compute_endpoint/globus_compute_endpoint/strategies/test.py similarity index 96% rename from funcx_endpoint/funcx_endpoint/strategies/test.py rename to compute_endpoint/globus_compute_endpoint/strategies/test.py index b9543776d..d9a073c67 100644 --- a/funcx_endpoint/funcx_endpoint/strategies/test.py +++ b/compute_endpoint/globus_compute_endpoint/strategies/test.py @@ -1,7 +1,7 @@ import queue import time -from funcx_endpoint.strategies import SimpleStrategy +from globus_compute_endpoint.strategies import SimpleStrategy class MockInterchange: diff --git a/compute_endpoint/globus_compute_endpoint/version.py b/compute_endpoint/globus_compute_endpoint/version.py new file mode 100644 index 000000000..b619dbd2e --- /dev/null +++ b/compute_endpoint/globus_compute_endpoint/version.py @@ -0,0 +1,21 @@ +# single source of truth for package version, +# see https://packaging.python.org/en/latest/single_source_version/ +__version__ = "2.0.0" + +# TODO: remove after a `globus-compute-sdk` release +# this is needed because it's imported by `globus-compute-sdk` to do the version check +VERSION = __version__ + +# Here as it's the easier way for funcx-endpoint cli to display it +DEPRECATION_FUNCX_ENDPOINT = """ +funcX Endpoint has been renamed to Globus Compute Endpoint and the new package +is available on PyPI: + https://pypi.org/project/globus-compute-endpoint/ + +Please consider upgrading to Globus Compute. More information can be found at: + https://globus-compute.readthedocs.io/en/latest/funcx_upgrade.html +""" + + +# app name to send as part of requests +app_name = f"Globus Compute Endpoint v{__version__}" diff --git a/funcx_endpoint/setup.cfg b/compute_endpoint/setup.cfg similarity index 78% rename from funcx_endpoint/setup.cfg rename to compute_endpoint/setup.cfg index f1d2cf498..645a5f071 100644 --- a/funcx_endpoint/setup.cfg +++ b/compute_endpoint/setup.cfg @@ -1,6 +1,6 @@ [isort] profile = black -known_first_party = funcx,funcx_endpoint +known_first_party = globus-compute-sdk, globus-compute-endpoint [flake8] # config to be black-compatible diff --git a/funcx_endpoint/setup.py b/compute_endpoint/setup.py similarity index 68% rename from funcx_endpoint/setup.py rename to compute_endpoint/setup.py index 101f1a080..437889549 100644 --- a/funcx_endpoint/setup.py +++ b/compute_endpoint/setup.py @@ -4,9 +4,9 @@ REQUIRES = [ "requests>=2.20.0,<3", - "globus-sdk", # version will be bounded by `funcx` - "funcx>=1.0.8", - "funcx-common==0.0.25", + "globus-sdk", # version will be bounded by `globus-compute-sdk` + "globus-compute-sdk>=2.0.0", + "globus-compute-common==0.1.0", # table printing used in list-endpoints "texttable>=1.6.4,<2", # although psutil does not declare itself to use semver, it appears to offer @@ -29,7 +29,7 @@ # further investigation may be needed if the issue persists in the next pyzmq # release "pyzmq>=22.0.0,!=22.3.0,<=23.2.0", - # 'parsl' is a core requirement of the funcx-endpoint, essential to a range + # 'parsl' is a core requirement of the globus-compute-endpoint, essential to a range # of different features and functions # pin exact versions because it does not use semver "parsl==2023.1.23", @@ -47,15 +47,15 @@ version_ns = {} -with open(os.path.join("funcx_endpoint", "version.py")) as f: +with open(os.path.join("globus_compute_endpoint", "version.py")) as f: exec(f.read(), version_ns) version = version_ns["__version__"] setup( - name="funcx-endpoint", + name="globus-compute-endpoint", version=version, packages=find_packages(), - description="funcX: High Performance Function Serving for Science", + description="Globus Compute: High Performance Function Serving for Science", install_requires=REQUIRES, extras_require={ "test": TEST_REQUIRES, @@ -70,21 +70,21 @@ "Programming Language :: Python :: 3", "Topic :: Scientific/Engineering", ], - keywords=["funcX", "FaaS", "Function Serving"], + keywords=["Globus Compute", "FaaS", "Function Serving"], entry_points={ "console_scripts": [ - "funcx-endpoint=funcx_endpoint.cli:cli_run", - "funcx-interchange" - "=funcx_endpoint.executors.high_throughput.interchange:cli_run", - "funcx-manager" - "=funcx_endpoint.executors.high_throughput.funcx_manager:cli_run", - "funcx-worker" - "=funcx_endpoint.executors.high_throughput.funcx_worker:cli_run", + "globus-compute-endpoint=globus_compute_endpoint.cli:cli_run", + "globus-compute-interchange" + "=globus_compute_endpoint.executors.high_throughput.interchange:cli_run", + "globus-compute-manager" + "=globus_compute_endpoint.executors.high_throughput.manager:cli_run", + "globus-compute-worker" + "=globus_compute_endpoint.executors.high_throughput.worker:cli_run", ] }, include_package_data=True, - author="funcX team", - author_email="labs@globus.org", + author="Globus Compute Team", + author_email="support@globus.org", license="Apache License, Version 2.0", url="https://github.com/funcx-faas/funcx", ) diff --git a/funcx_endpoint/tests/__init__.py b/compute_endpoint/tests/__init__.py similarity index 100% rename from funcx_endpoint/tests/__init__.py rename to compute_endpoint/tests/__init__.py diff --git a/funcx_endpoint/tests/conftest.py b/compute_endpoint/tests/conftest.py similarity index 86% rename from funcx_endpoint/tests/conftest.py rename to compute_endpoint/tests/conftest.py index 4f280f589..ae7f9b96b 100644 --- a/funcx_endpoint/tests/conftest.py +++ b/compute_endpoint/tests/conftest.py @@ -5,12 +5,11 @@ import time import uuid +import globus_compute_sdk as gc import globus_sdk import pytest import responses - -import funcx -from funcx.sdk.web_client import FuncxWebClient +from globus_compute_sdk.sdk.web_client import WebClient @pytest.fixture(scope="session") @@ -43,15 +42,15 @@ def logout(self) -> bool: def get_auth_client(self) -> globus_sdk.AuthClient: return globus_sdk.AuthClient(authorizer=globus_sdk.NullAuthorizer()) - def get_funcx_web_client(self, *, base_url: str | None = None) -> FuncxWebClient: - return FuncxWebClient( + def get_web_client(self, *, base_url: str | None = None) -> WebClient: + return WebClient( base_url="https://api2.funcx.org/v2/", authorizer=globus_sdk.NullAuthorizer(), ) @pytest.fixture -def get_standard_funcx_client(): +def get_standard_compute_client(): responses.add( method=responses.GET, url="https://api2.funcx.org/v2/version", @@ -60,7 +59,7 @@ def get_standard_funcx_client(): ) def func(): - return funcx.FuncXClient( + return gc.Client( login_manager=FakeLoginManager(), do_version_check=False, ) diff --git a/funcx_endpoint/tests/integration/conftest.py b/compute_endpoint/tests/integration/conftest.py similarity index 99% rename from funcx_endpoint/tests/integration/conftest.py rename to compute_endpoint/tests/integration/conftest.py index d63862f0b..e75ee2ea2 100644 --- a/funcx_endpoint/tests/integration/conftest.py +++ b/compute_endpoint/tests/integration/conftest.py @@ -13,18 +13,17 @@ import pika import pika.exceptions import pytest +from globus_compute_endpoint.endpoint.rabbit_mq import ( + RabbitPublisherStatus, + ResultQueuePublisher, + TaskQueueSubscriber, +) from pika.exchange_type import ExchangeType from tests.integration.test_rabbit_mq.result_queue_subscriber import ( ResultQueueSubscriber, ) from tests.integration.test_rabbit_mq.task_queue_publisher import TaskQueuePublisher -from funcx_endpoint.endpoint.rabbit_mq import ( - RabbitPublisherStatus, - ResultQueuePublisher, - TaskQueueSubscriber, -) - @pytest.fixture(scope="session") def rabbitmq_conn_url(): diff --git a/funcx_endpoint/tests/integration/funcx_endpoint/conftest.py b/compute_endpoint/tests/integration/endpoint/conftest.py similarity index 95% rename from funcx_endpoint/tests/integration/funcx_endpoint/conftest.py rename to compute_endpoint/tests/integration/endpoint/conftest.py index 41aa14cf9..54e780582 100644 --- a/funcx_endpoint/tests/integration/funcx_endpoint/conftest.py +++ b/compute_endpoint/tests/integration/endpoint/conftest.py @@ -24,7 +24,7 @@ def fake_setup_logging(*args, **kwargs): pass monkeypatch.setattr( - "funcx_endpoint.logging_config.setup_logging", fake_setup_logging + "globus_compute_endpoint.logging_config.setup_logging", fake_setup_logging ) diff --git a/compute_endpoint/tests/integration/endpoint/endpoint/test_endpoint.py b/compute_endpoint/tests/integration/endpoint/endpoint/test_endpoint.py new file mode 100644 index 000000000..c56d04207 --- /dev/null +++ b/compute_endpoint/tests/integration/endpoint/endpoint/test_endpoint.py @@ -0,0 +1,256 @@ +import os.path +import pathlib +import uuid +from unittest.mock import Mock, patch + +import globus_compute_sdk.sdk.client +import globus_compute_sdk.sdk.login_manager +import pytest +import responses +from click import ClickException +from click.testing import CliRunner +from globus_compute_endpoint.cli import ( + _do_logout_endpoints, + _do_stop_endpoint, + _upgrade_funcx_imports_in_config, + app, +) +from globus_compute_endpoint.endpoint import endpoint +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_sdk.sdk.web_client import WebClient + + +@pytest.fixture(autouse=True) +def patch_funcx_client(mocker): + return mocker.patch("globus_compute_endpoint.endpoint.endpoint.Client") + + +def test_non_configured_endpoint(mocker): + result = CliRunner().invoke(app, ["start", "newendpoint"]) + assert "newendpoint" in result.stdout + assert "not configured" in result.stdout + + +@pytest.mark.parametrize("status_code", [409, 410, 423]) +@responses.activate +def test_start_endpoint_blocked( + mocker, fs, randomstring, patch_funcx_client, status_code +): + # happy-path tested in tests/unit/test_endpoint_unit.py + + fx_addy = "http://api.funcx/" + gcc = globus_compute_sdk.Client( + funcx_service_address=fx_addy, + do_version_check=False, + login_manager=mocker.Mock(), + ) + fxwc = WebClient(base_url=fx_addy) + gcc.web_client = fxwc + patch_funcx_client.return_value = gcc + + mock_log = mocker.patch("globus_compute_endpoint.endpoint.endpoint.log") + reason_msg = randomstring() + responses.add( + responses.GET, + fx_addy + "version", + json={"api": "1.0.5", "min_ep_version": "1.0.5", "min_sdk_version": "0.0.2a0"}, + status=200, + ) + responses.add( + responses.POST, + fx_addy + "endpoints", + json={"reason": reason_msg}, + status=status_code, + ) + + ep_dir = pathlib.Path("/some/path/some_endpoint_name") + ep_dir.mkdir(parents=True, exist_ok=True) + + ep_id = str(uuid.uuid4()) + log_to_console = False + no_color = True + ep_conf = Config() + + ep = endpoint.Endpoint() + with pytest.raises(SystemExit): + ep.start_endpoint(ep_dir, ep_id, ep_conf, log_to_console, no_color, reg_info={}) + args, kwargs = mock_log.warning.call_args + assert "blocked" in args[0] + assert reason_msg in args[0] + + +def test_endpoint_logout(monkeypatch): + # not forced, and no running endpoints + logout_true = Mock(return_value=True) + logout_false = Mock(return_value=False) + monkeypatch.setattr( + globus_compute_sdk.sdk.login_manager.LoginManager, "logout", logout_true + ) + success, msg = _do_logout_endpoints( + False, + running_endpoints={}, + ) + logout_true.assert_called_once() + assert success + + logout_true.reset_mock() + + # forced, and no running endpoints + success, msg = _do_logout_endpoints( + True, + running_endpoints={}, + ) + logout_true.assert_called_once() + assert success + + one_running = { + "default": {"status": "Running", "id": "123abcde-a393-4456-8de5-123456789abc"} + } + + monkeypatch.setattr( + globus_compute_sdk.sdk.login_manager.LoginManager, "logout", logout_false + ) + # not forced, with running endpoint + success, msg = _do_logout_endpoints(False, running_endpoints=one_running) + logout_false.assert_not_called() + assert not success + + logout_true.reset_mock() + + monkeypatch.setattr( + globus_compute_sdk.sdk.login_manager.LoginManager, "logout", logout_true + ) + # forced, with running endpoint + success, msg = _do_logout_endpoints(True, running_endpoints=one_running) + logout_true.assert_called_once() + assert success + + +@patch( + "globus_compute_endpoint.endpoint.endpoint.Endpoint.get_endpoint_id", + return_value="abc-uuid", +) +@patch( + "globus_compute_endpoint.cli.get_config_dir", + return_value=pathlib.Path("some_ep_dir"), +) +@patch("globus_compute_endpoint.cli.read_config") +@patch("globus_compute_endpoint.endpoint.endpoint.Client.stop_endpoint") +def test_stop_remote_endpoint( + mock_get_id, mock_get_conf, mock_get_gcc, mock_stop_endpoint +): + _do_stop_endpoint(name="abc-endpoint", remote=False) + assert not mock_stop_endpoint.called + _do_stop_endpoint(name="abc-endpoint", remote=True) + assert mock_stop_endpoint.called + + +@patch( + "globus_compute_endpoint.endpoint.endpoint.Endpoint.get_endpoint_id", + return_value="abc-uuid", +) +@patch( + "globus_compute_endpoint.cli.get_config_dir", + return_value=pathlib.Path(), +) +@pytest.mark.parametrize( + "cur_config", + [ + [ + ("abc\n" "bcd" "cef"), + False, + False, + True, + False, + ], + [ + ("abc\n" "bcd" "cef"), + False, + False, + True, + True, + ], + [ + ( + "from funcx_endpoint.endpoint.utils.config import Config\n" + "from funcx_endpoint.executors import HighThroughputExecutor\n" + "from parsl.providers import LocalProvider\n" + "\n" + "config = Config(\n" + " executors=[\n" + " HighThroughputExecutor(\n" + " provider=LocalProvider(\n" + " init_blocks=1,\n" + " min_blocks=0,\n" + " max_blocks=1,\n" + "),\n" + ), + False, + True, + False, + False, + ], + [ + ( + "from funcx_endpoint.endpoint.utils.config import Config\n" + "from funcx_endpoint.executors import HighThroughputExecutor\n" + "from parsl.providers import LocalProvider\n" + ), + False, + True, + False, + True, + ], + [ + ( + "from funcx_endpoint.endpoint.utils.config import Config\n" + "from funcx_endpoint.executors import HighThroughputExecutor\n" + "from parsl.providers import LocalProvider\n" + ), + False, + True, + True, + True, + ], + [ + ( + "def abc():" + " from funcx_endpoint.endpoint.utils.config import Config\n" + " from funcx_endpoint.executors import HighThroughputExecutor\n" + " from parsl.providers import LocalProvider\n" + " return 'hello'\n" + ), + False, + False, + False, + False, + ], + ], +) +def test_endpoint_update_funcx(mock_get_id, mock_get_conf, fs, cur_config): + file_content, should_raise, modified, has_bak, do_force = cur_config + ep_dir = pathlib.Path("some_ep_dir") + ep_dir.mkdir(parents=True, exist_ok=True) + with open(ep_dir / "config.py", "w") as f: + f.write(file_content) + if has_bak: + with open(ep_dir / "config.py.bak", "w") as f: + f.write("old backup data\n") + + try: + msg = _upgrade_funcx_imports_in_config("some_ep_dir", force=do_force) + assert not should_raise + if modified: + assert "lines were modified" in msg + assert os.path.exists(ep_dir / "config.py.bak") + else: + assert "No funcX import statements" in msg + with open(ep_dir / "config.py") as f: + for line in f.readlines(): + assert not line.startswith("from funcx_endpoint.") + except ClickException as e: + if should_raise: + if has_bak and not do_force: + assert "Rename it or use" in str(e) + else: + assert AssertionError(f"Unexpected exception: {e}") diff --git a/funcx_endpoint/tests/integration/funcx_endpoint/endpoint/test_endpoint_manager.py b/compute_endpoint/tests/integration/endpoint/endpoint/test_endpoint_manager.py similarity index 91% rename from funcx_endpoint/tests/integration/funcx_endpoint/endpoint/test_endpoint_manager.py rename to compute_endpoint/tests/integration/endpoint/endpoint/test_endpoint_manager.py index 67ce936e0..19eca6f32 100644 --- a/funcx_endpoint/tests/integration/funcx_endpoint/endpoint/test_endpoint_manager.py +++ b/compute_endpoint/tests/integration/endpoint/endpoint/test_endpoint_manager.py @@ -9,11 +9,10 @@ import pytest import requests +from globus_compute_endpoint.endpoint import default_config +from globus_compute_endpoint.endpoint.endpoint import Endpoint from globus_sdk import GlobusAPIError -from funcx_endpoint.endpoint import default_config -from funcx_endpoint.endpoint.endpoint import Endpoint - logger = logging.getLogger("mock_funcx") @@ -96,7 +95,7 @@ def test_configure_multi_tenant(self, mt): "This test needs to be re-written after endpoint_register is updated" ) def test_start(self, mocker): - mock_client = mocker.patch("funcx_endpoint.endpoint.endpoint.FuncXClient") + mock_client = mocker.patch("globus_compute_endpoint.endpoint.endpoint.Client") reg_info = { "endpoint_id": "abcde12345", "address": "localhost", @@ -122,16 +121,16 @@ def test_start(self, mocker): mock_daemon = mocker.patch.object(Endpoint, "daemon_launch", return_value=None) - mock_uuid = mocker.patch("funcx_endpoint.endpoint.endpoint.uuid.uuid4") + mock_uuid = mocker.patch("globus_compute_endpoint.endpoint.endpoint.uuid.uuid4") mock_uuid.return_value = 123456 mock_pidfile = mocker.patch( - "funcx_endpoint.endpoint.endpoint.daemon.pidfile.PIDLockFile" + "globus_compute_endpoint.endpoint.endpoint.daemon.pidfile.PIDLockFile" ) mock_pidfile.return_value = None mock_results_ack_handler = mocker.patch( - "funcx_endpoint.endpoint.endpoint.ResultsAckHandler" + "globus_compute_endpoint.endpoint.endpoint.ResultsAckHandler" ) manager = Endpoint(funcx_dir=os.getcwd()) @@ -181,10 +180,10 @@ def test_start_registration_error(self, mocker): being asserted against because this zmq setup happens before registration occurs. """ - mocker.patch("funcx_endpoint.endpoint.endpoint.FuncXClient") + mocker.patch("globus_compute_endpoint.endpoint.endpoint.Client") mock_register_endpoint = mocker.patch( - "funcx_endpoint.endpoint.endpoint.register_endpoint" + "globus_compute_endpoint.endpoint.endpoint.register_endpoint" ) mock_register_endpoint.side_effect = GlobusAPIError( _fake_http_response(status=400, method="POST") @@ -198,15 +197,15 @@ def test_start_registration_error(self, mocker): return_value=(b"12345abcde", b"12345abcde"), ) - mock_uuid = mocker.patch("funcx_endpoint.endpoint.endpoint.uuid.uuid4") + mock_uuid = mocker.patch("globus_compute_endpoint.endpoint.endpoint.uuid.uuid4") mock_uuid.return_value = 123456 mock_pidfile = mocker.patch( - "funcx_endpoint.endpoint.endpoint.daemon.pidfile.PIDLockFile" + "globus_compute_endpoint.endpoint.endpoint.daemon.pidfile.PIDLockFile" ) mock_pidfile.return_value = None - mocker.patch("funcx_endpoint.endpoint.endpoint.ResultsAckHandler") + mocker.patch("globus_compute_endpoint.endpoint.endpoint.ResultsAckHandler") manager = Endpoint(funcx_dir=os.getcwd()) config_dir = os.path.join(manager.funcx_dir, "mock_endpoint") @@ -239,10 +238,10 @@ def test_start_registration_5xx_error(self, mocker): own. mock_zmq_create and mock_zmq_load are being asserted against because this zmq setup happens before registration occurs. """ - mocker.patch("funcx_endpoint.endpoint.endpoint.FuncXClient") + mocker.patch("globus_compute_endpoint.endpoint.endpoint.Client") mock_register_endpoint = mocker.patch( - "funcx_endpoint.endpoint.endpoint.register_endpoint" + "globus_compute_endpoint.endpoint.endpoint.register_endpoint" ) mock_register_endpoint.side_effect = GlobusAPIError( _fake_http_response(status=500, method="POST") @@ -266,16 +265,16 @@ def test_start_registration_5xx_error(self, mocker): mock_daemon = mocker.patch.object(Endpoint, "daemon_launch", return_value=None) - mock_uuid = mocker.patch("funcx_endpoint.endpoint.endpoint.uuid.uuid4") + mock_uuid = mocker.patch("globus_compute_endpoint.endpoint.endpoint.uuid.uuid4") mock_uuid.return_value = 123456 mock_pidfile = mocker.patch( - "funcx_endpoint.endpoint.endpoint.daemon.pidfile.PIDLockFile" + "globus_compute_endpoint.endpoint.endpoint.daemon.pidfile.PIDLockFile" ) mock_pidfile.return_value = None mock_results_ack_handler = mocker.patch( - "funcx_endpoint.endpoint.endpoint.ResultsAckHandler" + "globus_compute_endpoint.endpoint.endpoint.ResultsAckHandler" ) manager = Endpoint(funcx_dir=os.getcwd()) @@ -320,7 +319,7 @@ def test_start_registration_5xx_error(self, mocker): ) def test_start_without_executors(self, mocker): - mock_client = mocker.patch("funcx_endpoint.endpoint.endpoint.FuncXClient") + mock_client = mocker.patch("globus_compute_endpoint.endpoint.endpoint.Client") mock_client.return_value.register_endpoint.return_value = { "endpoint_id": "abcde12345", "address": "localhost", @@ -358,7 +357,7 @@ class mock_executors: @pytest.mark.skip("This test doesn't make much sense") def test_daemon_launch(self, mocker): mock_interchange = mocker.patch( - "funcx_endpoint.endpoint.endpoint.EndpointInterchange" + "globus_compute_endpoint.endpoint.endpoint.EndpointInterchange" ) mock_interchange.return_value.start.return_value = None mock_interchange.return_value.stop.return_value = None @@ -401,7 +400,7 @@ def test_daemon_launch(self, mocker): ) def test_with_funcx_config(self, mocker): mock_interchange = mocker.patch( - "funcx_endpoint.endpoint.interchange.EndpointInterchange" + "globus_compute_endpoint.endpoint.interchange.EndpointInterchange" ) mock_interchange.return_value.start.return_value = None mock_interchange.return_value.stop.return_value = None @@ -448,7 +447,7 @@ def test_with_funcx_config(self, mocker): ) def test_get_or_create_endpoint_uuid_no_json_no_uuid(self, mocker): - mock_uuid = mocker.patch("funcx_endpoint.endpoint.endpoint.uuid.uuid4") + mock_uuid = mocker.patch("globus_compute_endpoint.endpoint.endpoint.uuid.uuid4") mock_uuid.return_value = 123456 config_dir = pathlib.Path("/some/path/mock_endpoint") @@ -481,7 +480,7 @@ def test_delete_endpoint(self, mocker, dir_exists, web_svc_ok, force): config_dir = pathlib.Path("/some/path/mock_endpoint") ep_uuid_str = str(uuid.uuid4()) - mock_client = mocker.patch("funcx_endpoint.endpoint.endpoint.FuncXClient") + mock_client = mocker.patch("globus_compute_endpoint.endpoint.endpoint.Client") mock_stop_endpoint = mocker.patch.object(Endpoint, "stop_endpoint") mock_rmtree = mocker.patch.object(shutil, "rmtree") mocker.patch.object(Endpoint, "get_endpoint_id", return_value=ep_uuid_str) diff --git a/funcx_endpoint/tests/integration/funcx_endpoint/endpoint/test_interchange.py b/compute_endpoint/tests/integration/endpoint/endpoint/test_interchange.py similarity index 95% rename from funcx_endpoint/tests/integration/funcx_endpoint/endpoint/test_interchange.py rename to compute_endpoint/tests/integration/endpoint/endpoint/test_interchange.py index beb086b6e..db5d3d07a 100644 --- a/funcx_endpoint/tests/integration/funcx_endpoint/endpoint/test_interchange.py +++ b/compute_endpoint/tests/integration/endpoint/endpoint/test_interchange.py @@ -5,15 +5,14 @@ from importlib.machinery import SourceFileLoader import pytest -from funcx_common.messagepack import pack, unpack -from funcx_common.messagepack.message_types import EPStatusReport, Result, Task +from globus_compute_common.messagepack import pack, unpack +from globus_compute_common.messagepack.message_types import EPStatusReport, Result, Task +from globus_compute_endpoint.endpoint.endpoint import Endpoint +from globus_compute_endpoint.endpoint.interchange import EndpointInterchange, log +from globus_compute_endpoint.endpoint.utils.config import Config from tests.utils import try_for_timeout -from funcx_endpoint.endpoint.endpoint import Endpoint -from funcx_endpoint.endpoint.interchange import EndpointInterchange, log -from funcx_endpoint.endpoint.utils.config import Config - -_MOCK_BASE = "funcx_endpoint.endpoint.interchange." +_MOCK_BASE = "globus_compute_endpoint.endpoint.interchange." @pytest.fixture diff --git a/funcx_endpoint/tests/integration/funcx_endpoint/endpoint/test_interchange_with_rabbit.py b/compute_endpoint/tests/integration/endpoint/endpoint/test_interchange_with_rabbit.py similarity index 88% rename from funcx_endpoint/tests/integration/funcx_endpoint/endpoint/test_interchange_with_rabbit.py rename to compute_endpoint/tests/integration/endpoint/endpoint/test_interchange_with_rabbit.py index d1d492883..3c7c34369 100644 --- a/funcx_endpoint/tests/integration/funcx_endpoint/endpoint/test_interchange_with_rabbit.py +++ b/compute_endpoint/tests/integration/endpoint/endpoint/test_interchange_with_rabbit.py @@ -8,18 +8,17 @@ import dill import pika import pytest -from funcx_common.messagepack import pack -from funcx_common.messagepack.message_types import Result, Task -from tests.integration.funcx_endpoint.executors.mock_executors import MockExecutor +from globus_compute_common.messagepack import pack +from globus_compute_common.messagepack.message_types import Result, Task +from globus_compute_endpoint.endpoint.interchange import EndpointInterchange +from globus_compute_endpoint.endpoint.utils.config import Config +from tests.integration.endpoint.executors.mock_executors import MockExecutor from tests.utils import try_for_timeout -from funcx_endpoint.endpoint.interchange import EndpointInterchange -from funcx_endpoint.endpoint.utils.config import Config - @pytest.fixture def run_interchange_process( - get_standard_funcx_client, setup_register_endpoint_response, tmp_path + get_standard_compute_client, setup_register_endpoint_response, tmp_path ): """ Start and stop a subprocess that executes the EndpointInterchange class. @@ -45,9 +44,9 @@ def run_it(reg_info: dict, endpoint_uuid, endpoint_dir): endpoint_uuid = str(uuid.uuid4()) endpoint_name = "endpoint_foo" - fxc = get_standard_funcx_client() + gcc = get_standard_compute_client() setup_register_endpoint_response(endpoint_uuid) - reg_info = fxc.register_endpoint(endpoint_name, endpoint_uuid) + reg_info = gcc.register_endpoint(endpoint_name, endpoint_uuid) assert isinstance(reg_info, dict), "Test setup verification" assert reg_info["endpoint_id"] == endpoint_uuid, "Test setup verification" assert "task_queue_info" in reg_info diff --git a/funcx_endpoint/tests/integration/funcx_endpoint/endpoint/test_messages_compat.py b/compute_endpoint/tests/integration/endpoint/endpoint/test_messages_compat.py similarity index 76% rename from funcx_endpoint/tests/integration/funcx_endpoint/endpoint/test_messages_compat.py rename to compute_endpoint/tests/integration/endpoint/endpoint/test_messages_compat.py index ae9aad6d1..fefcf5583 100644 --- a/funcx_endpoint/tests/integration/funcx_endpoint/endpoint/test_messages_compat.py +++ b/compute_endpoint/tests/integration/endpoint/endpoint/test_messages_compat.py @@ -1,24 +1,27 @@ import pickle import uuid -from funcx_common.messagepack import unpack -from funcx_common.messagepack.message_types import Container, ContainerImage -from funcx_common.messagepack.message_types import ( +from globus_compute_common.messagepack import unpack +from globus_compute_common.messagepack.message_types import Container, ContainerImage +from globus_compute_common.messagepack.message_types import ( EPStatusReport as OutgoingEPStatusReport, ) -from funcx_common.messagepack.message_types import Task as OutgoingTask -from funcx_common.messagepack.message_types import TaskTransition -from funcx_common.tasks.constants import ActorName, TaskState - -from funcx_endpoint.endpoint.messages_compat import ( +from globus_compute_common.messagepack.message_types import Task as OutgoingTask +from globus_compute_common.messagepack.message_types import TaskTransition +from globus_compute_common.tasks.constants import ActorName, TaskState +from globus_compute_endpoint.endpoint.messages_compat import ( convert_to_internaltask, try_convert_to_messagepack, ) -from funcx_endpoint.executors.high_throughput.messages import ( +from globus_compute_endpoint.executors.high_throughput.messages import ( EPStatusReport as InternalEPStatusReport, ) -from funcx_endpoint.executors.high_throughput.messages import Message as InternalMessage -from funcx_endpoint.executors.high_throughput.messages import Task as InternalTask +from globus_compute_endpoint.executors.high_throughput.messages import ( + Message as InternalMessage, +) +from globus_compute_endpoint.executors.high_throughput.messages import ( + Task as InternalTask, +) def test_ep_status_report_conversion(): diff --git a/funcx_endpoint/tests/integration/funcx_endpoint/executors/high_throughput/test_funcx_manager.py b/compute_endpoint/tests/integration/endpoint/executors/high_throughput/test_manager.py similarity index 81% rename from funcx_endpoint/tests/integration/funcx_endpoint/executors/high_throughput/test_funcx_manager.py rename to compute_endpoint/tests/integration/endpoint/executors/high_throughput/test_manager.py index 7e42a2062..6c2f3e961 100644 --- a/funcx_endpoint/tests/integration/funcx_endpoint/executors/high_throughput/test_funcx_manager.py +++ b/compute_endpoint/tests/integration/endpoint/executors/high_throughput/test_manager.py @@ -4,9 +4,8 @@ import shutil import pytest - -from funcx_endpoint.executors.high_throughput.funcx_manager import Manager -from funcx_endpoint.executors.high_throughput.messages import Task +from globus_compute_endpoint.executors.high_throughput.manager import Manager +from globus_compute_endpoint.executors.high_throughput.messages import Task class TestManager: @@ -19,7 +18,7 @@ def test_setup_teardown(self): def test_remove_worker_init(self, mocker): # zmq is being mocked here because it was making tests hang mocker.patch( - "funcx_endpoint.executors.high_throughput.funcx_manager.zmq.Context" + "globus_compute_endpoint.executors.high_throughput.manager.zmq.Context" # noqa: E501 ) manager = Manager(logdir="./", uid="mock_uid") @@ -35,11 +34,11 @@ def test_remove_worker_init(self, mocker): def test_poll_funcx_task_socket(self, mocker): # zmq is being mocked here because it was making tests hang mocker.patch( - "funcx_endpoint.executors.high_throughput.funcx_manager.zmq.Context" + "globus_compute_endpoint.executors.high_throughput.manager.zmq.Context" # noqa: E501 ) mock_worker_map = mocker.patch( - "funcx_endpoint.executors.high_throughput.funcx_manager.WorkerMap" + "globus_compute_endpoint.executors.high_throughput.manager.WorkerMap" ) manager = Manager(logdir="./", uid="mock_uid") diff --git a/funcx_endpoint/tests/integration/funcx_endpoint/executors/high_throughput/test_worker_map.py b/compute_endpoint/tests/integration/endpoint/executors/high_throughput/test_worker_map.py similarity index 85% rename from funcx_endpoint/tests/integration/funcx_endpoint/executors/high_throughput/test_worker_map.py rename to compute_endpoint/tests/integration/endpoint/executors/high_throughput/test_worker_map.py index 02f365e1d..11b5728f6 100644 --- a/funcx_endpoint/tests/integration/funcx_endpoint/executors/high_throughput/test_worker_map.py +++ b/compute_endpoint/tests/integration/endpoint/executors/high_throughput/test_worker_map.py @@ -1,13 +1,13 @@ import logging import os -from funcx_endpoint.executors.high_throughput.worker_map import WorkerMap +from globus_compute_endpoint.executors.high_throughput.worker_map import WorkerMap class TestWorkerMap: def test_add_worker(self, mocker): mock_popen = mocker.patch( - "funcx_endpoint.executors.high_throughput.worker_map.subprocess.Popen" + "globus_compute_endpoint.executors.high_throughput.worker_map.subprocess.Popen" # noqa: E501 ) mock_popen.return_value = "proc" diff --git a/funcx_endpoint/tests/integration/funcx_endpoint/executors/mock_executors.py b/compute_endpoint/tests/integration/endpoint/executors/mock_executors.py similarity index 77% rename from funcx_endpoint/tests/integration/funcx_endpoint/executors/mock_executors.py rename to compute_endpoint/tests/integration/endpoint/executors/mock_executors.py index 79a0be286..444b55cc5 100644 --- a/funcx_endpoint/tests/integration/funcx_endpoint/executors/mock_executors.py +++ b/compute_endpoint/tests/integration/endpoint/executors/mock_executors.py @@ -4,10 +4,9 @@ import unittest.mock import dill -from funcx_common.messagepack.message_types import Result, Task - -from funcx import FuncXClient -from funcx_endpoint.executors.high_throughput.messages import Message +from globus_compute_common.messagepack.message_types import Result, Task +from globus_compute_endpoint.executors.high_throughput.messages import Message +from globus_compute_sdk import Client class MockExecutor(unittest.mock.Mock): @@ -19,7 +18,7 @@ def __init__(self, *args, **kwargs): def start( self, results_passthrough: multiprocessing.Queue = None, - funcx_client: FuncXClient = None, + funcx_client: Client = None, ): self.results_passthrough = results_passthrough self.funcx_client = funcx_client diff --git a/funcx_endpoint/tests/integration/test_rabbit_mq/result_queue_subscriber.py b/compute_endpoint/tests/integration/test_rabbit_mq/result_queue_subscriber.py similarity index 99% rename from funcx_endpoint/tests/integration/test_rabbit_mq/result_queue_subscriber.py rename to compute_endpoint/tests/integration/test_rabbit_mq/result_queue_subscriber.py index c4660e4df..a2b88ba73 100644 --- a/funcx_endpoint/tests/integration/test_rabbit_mq/result_queue_subscriber.py +++ b/compute_endpoint/tests/integration/test_rabbit_mq/result_queue_subscriber.py @@ -10,8 +10,7 @@ from multiprocessing.synchronize import Event as EventType import pika - -from funcx_endpoint.endpoint.rabbit_mq.base import SubscriberProcessStatus +from globus_compute_endpoint.endpoint.rabbit_mq.base import SubscriberProcessStatus logger = logging.getLogger(__name__) diff --git a/funcx_endpoint/tests/integration/test_rabbit_mq/task_queue_publisher.py b/compute_endpoint/tests/integration/test_rabbit_mq/task_queue_publisher.py similarity index 96% rename from funcx_endpoint/tests/integration/test_rabbit_mq/task_queue_publisher.py rename to compute_endpoint/tests/integration/test_rabbit_mq/task_queue_publisher.py index a3f932550..8a17ea473 100644 --- a/funcx_endpoint/tests/integration/test_rabbit_mq/task_queue_publisher.py +++ b/compute_endpoint/tests/integration/test_rabbit_mq/task_queue_publisher.py @@ -4,8 +4,7 @@ import pika import pika.channel - -from funcx_endpoint.endpoint.rabbit_mq.base import RabbitPublisherStatus +from globus_compute_endpoint.endpoint.rabbit_mq.base import RabbitPublisherStatus logger = logging.getLogger(__name__) diff --git a/funcx_endpoint/tests/integration/test_rabbit_mq/test_rabbit_e2e.py b/compute_endpoint/tests/integration/test_rabbit_mq/test_rabbit_e2e.py similarity index 100% rename from funcx_endpoint/tests/integration/test_rabbit_mq/test_rabbit_e2e.py rename to compute_endpoint/tests/integration/test_rabbit_mq/test_rabbit_e2e.py diff --git a/funcx_endpoint/tests/integration/test_rabbit_mq/test_result_q.py b/compute_endpoint/tests/integration/test_rabbit_mq/test_result_q.py similarity index 100% rename from funcx_endpoint/tests/integration/test_rabbit_mq/test_result_q.py rename to compute_endpoint/tests/integration/test_rabbit_mq/test_result_q.py diff --git a/funcx_endpoint/tests/integration/test_rabbit_mq/test_result_q_heartbeat.py b/compute_endpoint/tests/integration/test_rabbit_mq/test_result_q_heartbeat.py similarity index 100% rename from funcx_endpoint/tests/integration/test_rabbit_mq/test_result_q_heartbeat.py rename to compute_endpoint/tests/integration/test_rabbit_mq/test_result_q_heartbeat.py diff --git a/funcx_endpoint/tests/integration/test_rabbit_mq/test_task_q.py b/compute_endpoint/tests/integration/test_rabbit_mq/test_task_q.py similarity index 98% rename from funcx_endpoint/tests/integration/test_rabbit_mq/test_task_q.py rename to compute_endpoint/tests/integration/test_rabbit_mq/test_task_q.py index 6cb0e490a..bd0093dc6 100644 --- a/funcx_endpoint/tests/integration/test_rabbit_mq/test_task_q.py +++ b/compute_endpoint/tests/integration/test_rabbit_mq/test_task_q.py @@ -6,8 +6,7 @@ import uuid import pytest - -from funcx_endpoint.endpoint.rabbit_mq import TaskQueueSubscriber +from globus_compute_endpoint.endpoint.rabbit_mq import TaskQueueSubscriber def test_synch(start_task_q_publisher, start_task_q_subscriber, count=10): diff --git a/funcx_endpoint/tests/unit/test_bad_endpoint_config.py b/compute_endpoint/tests/unit/test_bad_endpoint_config.py similarity index 82% rename from funcx_endpoint/tests/unit/test_bad_endpoint_config.py rename to compute_endpoint/tests/unit/test_bad_endpoint_config.py index 87a480326..01e9d4421 100644 --- a/funcx_endpoint/tests/unit/test_bad_endpoint_config.py +++ b/compute_endpoint/tests/unit/test_bad_endpoint_config.py @@ -1,6 +1,7 @@ import pytest - -from funcx_endpoint.executors.high_throughput.executor import HighThroughputExecutor +from globus_compute_endpoint.executors.high_throughput.executor import ( + HighThroughputExecutor, +) invalid_addresses = ["localhost", "login1.theta.alcf.anl.gov", "*"] diff --git a/funcx_endpoint/tests/unit/test_cli_behavior.py b/compute_endpoint/tests/unit/test_cli_behavior.py similarity index 88% rename from funcx_endpoint/tests/unit/test_cli_behavior.py rename to compute_endpoint/tests/unit/test_cli_behavior.py index 576dfb0dc..7d9d9414f 100644 --- a/funcx_endpoint/tests/unit/test_cli_behavior.py +++ b/compute_endpoint/tests/unit/test_cli_behavior.py @@ -6,8 +6,7 @@ import pytest from click.testing import CliRunner - -from funcx_endpoint.cli import app +from globus_compute_endpoint.cli import app @pytest.fixture @@ -17,9 +16,9 @@ def funcx_dir_path(tmp_path): @pytest.fixture(autouse=True) def mock_cli_state(funcx_dir_path): - with mock.patch("funcx_endpoint.cli.Endpoint") as mock_ep: + with mock.patch("globus_compute_endpoint.cli.Endpoint") as mock_ep: mock_ep.return_value = mock_ep - with mock.patch("funcx_endpoint.cli.CommandState.ensure") as m_state: + with mock.patch("globus_compute_endpoint.cli.CommandState.ensure") as m_state: mock_state = mock.Mock() mock_state.endpoint_config_dir = funcx_dir_path m_state.return_value = mock_state @@ -36,7 +35,7 @@ def func(name): ep_dir.mkdir(parents=True, exist_ok=True) ep_config = ep_dir / "config.py" ep_config.write_text( # minimal setup to make loading work - "from funcx_endpoint.endpoint.utils.config import Config\n" + "from globus_compute_endpoint.endpoint.utils.config import Config\n" "config = Config(multi_tenant=False)" ) @@ -100,8 +99,8 @@ def test_start_ep_reads_stdin( ): data_is_valid, reg_info = reg_data - mock_log = mocker.patch("funcx_endpoint.cli.log") - mock_sys = mocker.patch("funcx_endpoint.cli.sys") + mock_log = mocker.patch("globus_compute_endpoint.cli.log") + mock_sys = mocker.patch("globus_compute_endpoint.cli.sys") mock_sys.stdin.closed = False mock_sys.stdin.isatty.return_value = False mock_sys.stdin.read.return_value = reg_info @@ -123,7 +122,7 @@ def test_start_ep_reads_stdin( assert reg_info_found == {} -@mock.patch("funcx_endpoint.cli.read_config") +@mock.patch("globus_compute_endpoint.cli.read_config") def test_stop_endpoint(read_config, run_line, mock_cli_state, make_endpoint_dir): run_line("stop foo") mock_ep, _ = mock_cli_state @@ -147,7 +146,7 @@ def test_start_ep_incorrect(run_line, mock_cli_state, make_endpoint_dir): conf = mock_state.endpoint_config_dir / "foo" / "config.py" conf.write_text("asa asd df = 5") # fail the import - with mock.patch("funcx_endpoint.cli.log") as mock_log: + with mock.patch("globus_compute_endpoint.cli.log") as mock_log: res = run_line("start foo", assert_exit_code=1) assert "might be out of date" in mock_log.exception.call_args[0][0] assert isinstance(res.exception, SyntaxError) @@ -160,7 +159,7 @@ def test_start_ep_incorrect(run_line, mock_cli_state, make_endpoint_dir): assert "modified incorrectly?" in res.stderr -@mock.patch("funcx_endpoint.cli.read_config") +@mock.patch("globus_compute_endpoint.cli.read_config") def test_delete_endpoint(read_config, run_line, mock_cli_state): run_line("delete foo --yes") mock_ep, _ = mock_cli_state diff --git a/funcx_endpoint/tests/unit/test_command_queue_subscriber.py b/compute_endpoint/tests/unit/test_command_queue_subscriber.py similarity index 97% rename from funcx_endpoint/tests/unit/test_command_queue_subscriber.py rename to compute_endpoint/tests/unit/test_command_queue_subscriber.py index 35be6737f..a1930099c 100644 --- a/funcx_endpoint/tests/unit/test_command_queue_subscriber.py +++ b/compute_endpoint/tests/unit/test_command_queue_subscriber.py @@ -6,13 +6,12 @@ import threading from unittest import mock +import globus_compute_endpoint.endpoint.rabbit_mq.command_queue_subscriber as cqs import pytest as pytest from pika.spec import Basic, BasicProperties from tests.utils import try_assert -import funcx_endpoint.endpoint.rabbit_mq.command_queue_subscriber as cqs - -_MOCK_BASE = "funcx_endpoint.endpoint.rabbit_mq.command_queue_subscriber." +_MOCK_BASE = "globus_compute_endpoint.endpoint.rabbit_mq.command_queue_subscriber." class MockedCommandQueueSubscriber(cqs.CommandQueueSubscriber): diff --git a/funcx_endpoint/tests/unit/test_endpoint_unit.py b/compute_endpoint/tests/unit/test_endpoint_unit.py similarity index 88% rename from funcx_endpoint/tests/unit/test_endpoint_unit.py rename to compute_endpoint/tests/unit/test_endpoint_unit.py index ce6bb5ee4..8d6e03be7 100644 --- a/funcx_endpoint/tests/unit/test_endpoint_unit.py +++ b/compute_endpoint/tests/unit/test_endpoint_unit.py @@ -13,13 +13,12 @@ import pytest import responses +from globus_compute_endpoint.endpoint import endpoint +from globus_compute_endpoint.endpoint.default_config import config as default_config +from globus_compute_endpoint.endpoint.endpoint import Endpoint +from globus_compute_endpoint.endpoint.utils.config import Config -from funcx_endpoint.endpoint import endpoint -from funcx_endpoint.endpoint.default_config import config as default_config -from funcx_endpoint.endpoint.endpoint import Endpoint -from funcx_endpoint.endpoint.utils.config import Config - -_mock_base = "funcx_endpoint.endpoint.endpoint." +_mock_base = "globus_compute_endpoint.endpoint.endpoint." @pytest.fixture @@ -91,8 +90,6 @@ def mock_ep_data(fs): @pytest.fixture def mock_ep_buf(): buf = io.StringIO() - # Endpoint.get_endpoint - # ep = mocker.patch("funcx_endpoint.endpoint.endpoint.Endpoint.get_endpoints") Endpoint.get_endpoints = mock.Mock() Endpoint.get_endpoints.return_value = {} @@ -122,15 +119,15 @@ def test_start_endpoint( mocker, fs, randomstring, - get_standard_funcx_client, + get_standard_compute_client, register_endpoint_response, mock_ep_data, ): - mock_fxc = get_standard_funcx_client() + mock_gcc = get_standard_compute_client() mock_log = mocker.patch(f"{_mock_base}log") mock_daemon = mocker.patch(f"{_mock_base}daemon") mock_epinterchange = mocker.patch(f"{_mock_base}EndpointInterchange") - mocker.patch(f"{_mock_base}Endpoint.get_funcx_client").return_value = mock_fxc + mocker.patch(f"{_mock_base}Endpoint.get_funcx_client").return_value = mock_gcc ep, ep_dir, log_to_console, no_color, ep_conf = mock_ep_data ep_id = str(uuid.uuid4()) @@ -163,12 +160,12 @@ def test_register_endpoint_invalid_response( endpoint_uuid, other_endpoint_id, register_endpoint_response, - get_standard_funcx_client, + get_standard_compute_client, mock_ep_data, ): - mock_fxc = get_standard_funcx_client() + mock_gcc = get_standard_compute_client() mock_log = mocker.patch(f"{_mock_base}log") - mocker.patch(f"{_mock_base}Endpoint.get_funcx_client").return_value = mock_fxc + mocker.patch(f"{_mock_base}Endpoint.get_funcx_client").return_value = mock_gcc ep, ep_dir, log_to_console, no_color, ep_conf = mock_ep_data @@ -190,14 +187,14 @@ def test_register_endpoint_locked_error( mocker, fs, register_endpoint_failure_response, - get_standard_funcx_client, + get_standard_compute_client, mock_ep_data, ): """ Check to ensure endpoint registration escalates up with API error """ - mock_fxc = get_standard_funcx_client() - mocker.patch(f"{_mock_base}Endpoint.get_funcx_client").return_value = mock_fxc + mock_gcc = get_standard_compute_client() + mocker.patch(f"{_mock_base}Endpoint.get_funcx_client").return_value = mock_gcc ep, ep_dir, log_to_console, no_color, ep_conf = mock_ep_data ep_id = str(uuid.uuid4()) @@ -214,15 +211,15 @@ def test_register_endpoint_is_not_multitenant( fs, endpoint_uuid, register_endpoint_response, - get_standard_funcx_client, + get_standard_compute_client, randomstring, multi_tenant, mock_ep_data, ): - mock_fxc = get_standard_funcx_client() + mock_gcc = get_standard_compute_client() mock_daemon = mocker.patch(f"{_mock_base}daemon") mock_epinterchange = mocker.patch(f"{_mock_base}EndpointInterchange") - mocker.patch(f"{_mock_base}Endpoint.get_funcx_client").return_value = mock_fxc + mocker.patch(f"{_mock_base}Endpoint.get_funcx_client").return_value = mock_gcc ep, ep_dir, log_to_console, no_color, ep_conf = mock_ep_data ep_id = str(uuid.uuid4()) @@ -248,7 +245,7 @@ def test_list_endpoints_none_configured(mock_ep_buf): Endpoint.print_endpoint_table() assert "No endpoints configured" in buf.getvalue() assert "Hint:" in buf.getvalue() - assert "funcx-endpoint configure" in buf.getvalue() + assert "globus-compute-endpoint configure" in buf.getvalue() def test_list_endpoints_no_id_yet(mock_ep_buf, randomstring): @@ -268,7 +265,7 @@ def test_list_endpoints_long_names_wrapped( ): buf = mock_ep_buf tsize = namedtuple("terminal_size", ["columns", "lines"])(*term_size) - mock_shutil = mocker.patch("funcx_endpoint.endpoint.endpoint.shutil") + mock_shutil = mocker.patch("globus_compute_endpoint.endpoint.endpoint.shutil") mock_shutil.get_terminal_size.return_value = tsize def rand_length_str(min_=2, max_=30): @@ -319,13 +316,14 @@ def test_endpoint_get_metadata(mocker): } mocker.patch( - "funcx_endpoint.endpoint.endpoint.__version__", mock_data["endpoint_version"] + "globus_compute_endpoint.endpoint.endpoint.__version__", + mock_data["endpoint_version"], ) - mock_fqdn = mocker.patch("funcx_endpoint.endpoint.endpoint.socket.getfqdn") + mock_fqdn = mocker.patch("globus_compute_endpoint.endpoint.endpoint.socket.getfqdn") mock_fqdn.return_value = mock_data["hostname"] - mock_pwuid = mocker.patch("funcx_endpoint.endpoint.endpoint.pwd.getpwuid") + mock_pwuid = mocker.patch("globus_compute_endpoint.endpoint.endpoint.pwd.getpwuid") mock_pwuid.return_value = SimpleNamespace(pw_name=mock_data["local_user"]) meta = Endpoint.get_metadata(default_config) @@ -351,9 +349,9 @@ def test_endpoint_sets_process_title(mocker, fs, randomstring, mock_ep_data, env orig_proc_title = randomstring() - mock_fxc = mocker.Mock() - mock_fxc.register_endpoint.return_value = {"endpoint_id": ep_id} - mocker.patch(f"{_mock_base}Endpoint.get_funcx_client", return_value=mock_fxc) + mock_gcc = mocker.Mock() + mock_gcc.register_endpoint.return_value = {"endpoint_id": ep_id} + mocker.patch(f"{_mock_base}Endpoint.get_funcx_client", return_value=mock_gcc) mock_spt = mocker.patch(f"{_mock_base}setproctitle") mock_spt.getproctitle.return_value = orig_proc_title @@ -363,7 +361,9 @@ def test_endpoint_sets_process_title(mocker, fs, randomstring, mock_ep_data, env ep.start_endpoint(ep_dir, ep_id, ep_conf, log_to_console, no_color, reg_info={}) a, _k = mock_spt.setproctitle.call_args - assert a[0].startswith("funcX Endpoint"), "Expect easily identifiable process name" + assert a[0].startswith( + "Globus Compute Endpoint" + ), "Expect easily identifiable process name" assert f"{ep_id}, {ep_dir.name}" in a[0], "Expect easily match process to ep conf" if not env: assert " - " not in a[0], "Default is not 'do not show env' for prod" @@ -372,14 +372,14 @@ def test_endpoint_sets_process_title(mocker, fs, randomstring, mock_ep_data, env assert a[0].endswith(f"[{orig_proc_title}]"), "Save original cmdline for debugging" -def test_endpoint_needs_no_fxclient_if_reg_info(mocker, fs, randomstring, mock_ep_data): +def test_endpoint_needs_no_client_if_reg_info(mocker, fs, randomstring, mock_ep_data): ep, ep_dir, log_to_console, no_color, ep_conf = mock_ep_data ep_id = str(uuid.uuid4()) - mock_fxc = mocker.Mock() - mock_fxc.register_endpoint.return_value = {"endpoint_id": ep_id} - mock_get_funcx_client = mocker.patch( - f"{_mock_base}Endpoint.get_funcx_client", return_value=mock_fxc + mock_gcc = mocker.Mock() + mock_gcc.register_endpoint.return_value = {"endpoint_id": ep_id} + mock_get_compute_client = mocker.patch( + f"{_mock_base}Endpoint.get_funcx_client", return_value=mock_gcc ) mock_daemon = mocker.patch(f"{_mock_base}daemon") mock_epinterchange = mocker.patch(f"{_mock_base}EndpointInterchange") @@ -389,13 +389,13 @@ def test_endpoint_needs_no_fxclient_if_reg_info(mocker, fs, randomstring, mock_e assert mock_epinterchange.called, "Has registration, should start." assert mock_daemon.DaemonContext.called - assert not mock_get_funcx_client.called, "No need for FXClient!" + assert not mock_get_compute_client.called, "No need for FXClient!" reg_info.clear() ep.start_endpoint(ep_dir, ep_id, ep_conf, log_to_console, no_color, reg_info) assert mock_epinterchange.called, "Has registration, should start." assert mock_daemon.DaemonContext.called - assert mock_get_funcx_client.called, "Need registration info, need FXClient" + assert mock_get_compute_client.called, "Need registration info, need FXClient" def test_endpoint_sets_owner_only_access(tmp_path, umask): diff --git a/funcx_endpoint/tests/unit/test_endpointinterchange.py b/compute_endpoint/tests/unit/test_endpointinterchange.py similarity index 92% rename from funcx_endpoint/tests/unit/test_endpointinterchange.py rename to compute_endpoint/tests/unit/test_endpointinterchange.py index 243f22cc7..3ed71d073 100644 --- a/funcx_endpoint/tests/unit/test_endpointinterchange.py +++ b/compute_endpoint/tests/unit/test_endpointinterchange.py @@ -2,11 +2,10 @@ from unittest.mock import MagicMock import pytest +from globus_compute_endpoint.endpoint.interchange import EndpointInterchange +from globus_compute_endpoint.endpoint.utils.config import Config -from funcx_endpoint.endpoint.interchange import EndpointInterchange -from funcx_endpoint.endpoint.utils.config import Config - -_mock_base = "funcx_endpoint.endpoint.interchange." +_mock_base = "globus_compute_endpoint.endpoint.interchange." def test_main_exception_always_quiesces(mocker, fs): diff --git a/funcx_endpoint/tests/unit/test_endpointmanager_unit.py b/compute_endpoint/tests/unit/test_endpointmanager_unit.py similarity index 95% rename from funcx_endpoint/tests/unit/test_endpointmanager_unit.py rename to compute_endpoint/tests/unit/test_endpointmanager_unit.py index f50d0879f..dcd05da0b 100644 --- a/funcx_endpoint/tests/unit/test_endpointmanager_unit.py +++ b/compute_endpoint/tests/unit/test_endpointmanager_unit.py @@ -14,13 +14,12 @@ import pika import pytest as pytest import responses +from globus_compute_endpoint.endpoint.endpoint_manager import EndpointManager +from globus_compute_endpoint.endpoint.utils import _redact_url_creds +from globus_compute_endpoint.endpoint.utils.config import Config from globus_sdk import GlobusAPIError, NetworkError -from funcx_endpoint.endpoint.endpoint_manager import EndpointManager -from funcx_endpoint.endpoint.utils import _redact_url_creds -from funcx_endpoint.endpoint.utils.config import Config - -_MOCK_BASE = "funcx_endpoint.endpoint.endpoint_manager." +_MOCK_BASE = "globus_compute_endpoint.endpoint.endpoint_manager." @pytest.fixture @@ -46,18 +45,18 @@ def mock_setproctitle(mocker, randomstring): @pytest.fixture def mock_client(mocker): ep_uuid = str(uuid.uuid1()) - mock_fxc = mocker.Mock() - mock_fxc.register_endpoint.return_value = { + mock_gcc = mocker.Mock() + mock_gcc.register_endpoint.return_value = { "endpoint_id": ep_uuid, "command_queue_info": {"connection_url": "", "queue": ""}, } - mocker.patch(f"{_MOCK_BASE}funcx.FuncXClient", return_value=mock_fxc) - yield ep_uuid, mock_fxc + mocker.patch("globus_compute_sdk.Client", return_value=mock_gcc) + yield ep_uuid, mock_gcc @pytest.fixture def epmanager(mocker, conf_dir, mock_conf, mock_client): - ep_uuid, mock_fxc = mock_client + ep_uuid, mock_gcc = mock_client em = EndpointManager(conf_dir, ep_uuid, mock_conf) em._command = mocker.Mock() @@ -119,14 +118,16 @@ def test_sets_process_title( ): mock_spt, orig_proc_title = mock_setproctitle - ep_uuid, mock_fxc = mock_client + ep_uuid, mock_gcc = mock_client mock_conf.environment = env EndpointManager(conf_dir, ep_uuid, mock_conf) assert mock_spt.setproctitle.called, "Sanity check" a, *_ = mock_spt.setproctitle.call_args - assert a[0].startswith("funcX Endpoint"), "Expect easily identifiable process name" + assert a[0].startswith( + "Globus Compute Endpoint" + ), "Expect easily identifiable process name" assert "*(" in a[0], "Expected asterisk as subtle clue of 'multi-tenant'" assert f"{ep_uuid}, {conf_dir.name}" in a[0], "Can find process by conf" @@ -146,12 +147,12 @@ def test_gracefully_exits_if_in_conflict_or_locked( mock_conf, endpoint_uuid, randomstring, - get_standard_funcx_client, + get_standard_compute_client, status_code, ): mock_log = mocker.patch(f"{_MOCK_BASE}log") - mock_fxc = get_standard_funcx_client() - mocker.patch(f"{_MOCK_BASE}funcx.FuncXClient", return_value=mock_fxc) + mock_gcc = get_standard_compute_client() + mocker.patch("globus_compute_sdk.Client", return_value=mock_gcc) some_err = randomstring() register_endpoint_failure_response(status_code, some_err) @@ -171,11 +172,11 @@ def test_gracefully_exits_if_in_conflict_or_locked( def test_sends_metadata_during_registration(conf_dir, mock_conf, mock_client): - ep_uuid, mock_fxc = mock_client + ep_uuid, mock_gcc = mock_client EndpointManager(conf_dir, ep_uuid, mock_conf) - assert mock_fxc.register_endpoint.called - _a, k = mock_fxc.register_endpoint.call_args + assert mock_gcc.register_endpoint.called + _a, k = mock_gcc.register_endpoint.call_args for key in ("endpoint_version", "hostname", "local_user", "config"): assert key in k["metadata"], "Expected minimal metadata" @@ -202,7 +203,7 @@ def test_handles_network_error_scriptably( mock_log = mocker.patch(f"{_MOCK_BASE}log") some_err = randomstring() mocker.patch( - f"{_MOCK_BASE}funcx.FuncXClient", + "globus_compute_sdk.Client", side_effect=NetworkError(some_err, Exception()), ) @@ -221,7 +222,7 @@ def test_mismatched_id_gracefully_exits( mocker, randomstring, conf_dir, mock_conf, mock_client ): mock_log = mocker.patch(f"{_MOCK_BASE}log") - wrong_uuid, mock_fxc = mock_client + wrong_uuid, mock_gcc = mock_client ep_uuid = str(uuid.uuid4()) assert wrong_uuid != ep_uuid, "Verify test setup" @@ -249,9 +250,9 @@ def test_handles_invalid_reg_info( mocker, randomstring, conf_dir, mock_conf, mock_client, received_data ): mock_log = mocker.patch(f"{_MOCK_BASE}log") - ep_uuid, mock_fxc = mock_client + ep_uuid, mock_gcc = mock_client received_data[1]["endpoint_id"] = ep_uuid - should_succeed, mock_fxc.register_endpoint.return_value = received_data + should_succeed, mock_gcc.register_endpoint.return_value = received_data if not should_succeed: with pytest.raises(SystemExit) as pyexc: @@ -267,9 +268,9 @@ def test_handles_invalid_reg_info( def test_writes_endpoint_uuid(epmanager): conf_dir, _mock_conf, mock_client, _em = epmanager - _ep_uuid, mock_fxc = mock_client + _ep_uuid, mock_gcc = mock_client - returned_uuid = mock_fxc.register_endpoint.return_value["endpoint_id"] + returned_uuid = mock_gcc.register_endpoint.return_value["endpoint_id"] ep_json_path = conf_dir / "endpoint.json" assert ep_json_path.exists() @@ -705,7 +706,7 @@ def test_start_endpoint_children_die_with_parent(successful_exec): assert pyexc.value.code == 85, "Q&D: verify we exec'ed, based on '+= 1'" a, k = mock_os.execvpe.call_args - assert a[0] == "funcx-endpoint", "Sanity check" + assert a[0] == "globus-compute-endpoint", "Sanity check" assert k["args"][0] == a[0], "Expect transparency for admin" assert k["args"][-1] == "--die-with-parent" # trust flag to do the hard work diff --git a/funcx_endpoint/tests/unit/test_highthroughputinterchange.py b/compute_endpoint/tests/unit/test_highthroughputinterchange.py similarity index 87% rename from funcx_endpoint/tests/unit/test_highthroughputinterchange.py rename to compute_endpoint/tests/unit/test_highthroughputinterchange.py index 1bee74c05..5a1363faa 100644 --- a/funcx_endpoint/tests/unit/test_highthroughputinterchange.py +++ b/compute_endpoint/tests/unit/test_highthroughputinterchange.py @@ -3,13 +3,15 @@ from unittest import mock import pytest -from funcx_common.tasks import TaskState - -from funcx_endpoint.executors.high_throughput.interchange import Interchange, starter -from funcx_endpoint.executors.high_throughput.messages import Task +from globus_compute_common.tasks import TaskState +from globus_compute_endpoint.executors.high_throughput.interchange import ( + Interchange, + starter, +) +from globus_compute_endpoint.executors.high_throughput.messages import Task # Work with linter's 88 char limit, and be uniform in this file how we do it -mod_dot_path = "funcx_endpoint.executors.high_throughput.interchange" +mod_dot_path = "globus_compute_endpoint.executors.high_throughput.interchange" @mock.patch(f"{mod_dot_path}.zmq") diff --git a/funcx_endpoint/tests/unit/test_funcx_manager_unit.py b/compute_endpoint/tests/unit/test_manager_unit.py similarity index 64% rename from funcx_endpoint/tests/unit/test_funcx_manager_unit.py rename to compute_endpoint/tests/unit/test_manager_unit.py index 8f5124568..abbdbc735 100644 --- a/funcx_endpoint/tests/unit/test_funcx_manager_unit.py +++ b/compute_endpoint/tests/unit/test_manager_unit.py @@ -2,20 +2,19 @@ import uuid from unittest import mock -from funcx_common.tasks import TaskState +from globus_compute_common.tasks import TaskState +from globus_compute_endpoint.executors.high_throughput.manager import Manager +from globus_compute_endpoint.executors.high_throughput.messages import Task -from funcx_endpoint.executors.high_throughput.funcx_manager import Manager as FXManager -from funcx_endpoint.executors.high_throughput.messages import Task - -@mock.patch("funcx_endpoint.executors.high_throughput.funcx_manager.zmq") -class TestFuncxManager: +@mock.patch("globus_compute_endpoint.executors.high_throughput.manager.zmq") +class TestManager: def test_task_to_worker_status_change(self, randomstring): task_type = randomstring() task_id = str(uuid.uuid4()) task = Task(task_id, "RAW", b"") - mgr = FXManager(uid="some_uid", worker_type=task_type) + mgr = Manager(uid="some_uid", worker_type=task_type) mgr.worker_map = mock.Mock() mgr.worker_map.get_worker.return_value = "some_work_id" mgr.task_queues[task_type].put(task) diff --git a/funcx_endpoint/tests/unit/test_result_queue_publisher.py b/compute_endpoint/tests/unit/test_result_queue_publisher.py similarity index 94% rename from funcx_endpoint/tests/unit/test_result_queue_publisher.py rename to compute_endpoint/tests/unit/test_result_queue_publisher.py index f8fc56daa..bc2dc6fc8 100644 --- a/funcx_endpoint/tests/unit/test_result_queue_publisher.py +++ b/compute_endpoint/tests/unit/test_result_queue_publisher.py @@ -3,8 +3,7 @@ from unittest.mock import MagicMock import pika - -from funcx_endpoint.endpoint.rabbit_mq import ( +from globus_compute_endpoint.endpoint.rabbit_mq import ( RabbitPublisherStatus, ResultQueuePublisher, ) @@ -14,7 +13,7 @@ def test_rqp_verifies_provided_queue_info(mocker, randomstring): mock_conn = MagicMock() mock_channel = MagicMock() mock_pika = mocker.patch( - "funcx_endpoint.endpoint.rabbit_mq.result_queue_publisher.pika" + "globus_compute_endpoint.endpoint.rabbit_mq.result_queue_publisher.pika" ) mock_pika.BlockingConnection.return_value = mock_conn mock_conn.channel.return_value = mock_channel diff --git a/funcx_endpoint/tests/unit/test_result_store.py b/compute_endpoint/tests/unit/test_result_store.py similarity index 97% rename from funcx_endpoint/tests/unit/test_result_store.py rename to compute_endpoint/tests/unit/test_result_store.py index 544917b11..65e8b00c1 100644 --- a/funcx_endpoint/tests/unit/test_result_store.py +++ b/compute_endpoint/tests/unit/test_result_store.py @@ -1,8 +1,7 @@ import random import pytest - -from funcx_endpoint.endpoint.result_store import ResultStore +from globus_compute_endpoint.endpoint.result_store import ResultStore @pytest.fixture() diff --git a/funcx_endpoint/tests/unit/test_utils.py b/compute_endpoint/tests/unit/test_utils.py similarity index 91% rename from funcx_endpoint/tests/unit/test_utils.py rename to compute_endpoint/tests/unit/test_utils.py index eea6dfd5b..3d1f6c4ea 100644 --- a/funcx_endpoint/tests/unit/test_utils.py +++ b/compute_endpoint/tests/unit/test_utils.py @@ -1,4 +1,4 @@ -from funcx_endpoint.endpoint.utils import _redact_url_creds +from globus_compute_endpoint.endpoint.utils import _redact_url_creds def test_url_redaction(randomstring): diff --git a/funcx_endpoint/tests/unit/test_funcx_worker.py b/compute_endpoint/tests/unit/test_worker.py similarity index 93% rename from funcx_endpoint/tests/unit/test_funcx_worker.py rename to compute_endpoint/tests/unit/test_worker.py index 1ddc0da4d..b8784e5ab 100644 --- a/funcx_endpoint/tests/unit/test_funcx_worker.py +++ b/compute_endpoint/tests/unit/test_worker.py @@ -3,10 +3,9 @@ from unittest import mock import pytest -from funcx_common import messagepack - -from funcx_endpoint.executors.high_throughput.funcx_worker import FuncXWorker -from funcx_endpoint.executors.high_throughput.messages import Task +from globus_compute_common import messagepack +from globus_compute_endpoint.executors.high_throughput.messages import Task +from globus_compute_endpoint.executors.high_throughput.worker import Worker def hello_world(): @@ -30,12 +29,12 @@ def ez_pack_function(serializer, func, args, kwargs): @pytest.fixture def test_worker(): with mock.patch( - "funcx_endpoint.executors.high_throughput.funcx_worker.zmq.Context" + "globus_compute_endpoint.executors.high_throughput.worker.zmq.Context" ) as mock_context: # the worker will receive tasks and send messages on this mock socket mock_socket = mock.Mock() mock_context.return_value.socket.return_value = mock_socket - yield FuncXWorker("0", "127.0.0.1", 50001) + yield Worker("0", "127.0.0.1", 50001) def test_register_and_kill(test_worker): diff --git a/funcx_endpoint/tests/utils.py b/compute_endpoint/tests/utils.py similarity index 100% rename from funcx_endpoint/tests/utils.py rename to compute_endpoint/tests/utils.py diff --git a/funcx_endpoint/tox.ini b/compute_endpoint/tox.ini similarity index 84% rename from funcx_endpoint/tox.ini rename to compute_endpoint/tox.ini index e9f1aa869..b9cadd220 100644 --- a/funcx_endpoint/tox.ini +++ b/compute_endpoint/tox.ini @@ -5,7 +5,7 @@ skip_missing_interpreters = true [testenv] passenv = RABBITMQ_INTEGRATION_TEST_URI -deps = ../funcx_sdk/ +deps = ../compute_sdk/ extras = test usedevelop = true commands = @@ -17,8 +17,8 @@ commands = deps = mypy==0.950 types-requests - ../funcx_sdk/ -commands = mypy funcx_endpoint/ {posargs} + ../compute_sdk/ +commands = mypy globus_compute_endpoint/ {posargs} [testenv:publish-release] skip_install = true diff --git a/compute_funcx/README.md b/compute_funcx/README.md new file mode 100644 index 000000000..9b3bfa825 --- /dev/null +++ b/compute_funcx/README.md @@ -0,0 +1,6 @@ +# FuncX Wrapper Package + +* This package serves as a backwards compatible library for current funcX SDK users who may upgrade to the latest SDK but +does not want to make any changes to existing scripts that references funcx.* classes to use globus_compute_sdk.* instead. + +* Accomplished via a variety of import globus_compute_sdk.compute_class_X as FuncX_class_X in __init__.py and other files diff --git a/compute_funcx/RELEASING.md b/compute_funcx/RELEASING.md new file mode 100644 index 000000000..804b8d4ac --- /dev/null +++ b/compute_funcx/RELEASING.md @@ -0,0 +1,57 @@ +# Releasing + +The release of these wrapper packages of globus-compute-sdk and globus-compute-endpoint are intended +to support backwards compatibility for users who do not want to utilize the new globus-compute* packages. + +This release process is partially automated with tools to help along the way. + +## Prerequisites + +You must have the following tools installed and available: + +- `git` +- `scriv` +- `tox` + +You will also need the following credentials: + +- a configured GPG key in `git` in order to create signed tags +- pypi credentials for use with `twine` (e.g. a token in `~/.pypirc`) valid for + publishing `funcx` and `funcx-endpoint` + +## Procedure + +1. Bump versions of both packages to a new latest version number by removing + the alpha `a0` suffix, if any, and using the next higher number. + +```bash +$EDITOR sdk/funcx/version.py endpoint/funcx_endpoint/version.py +``` + +2. Update the changelog by running `scriv collect --edit` + +3. Add and commit the changes to version numbers and changelog files (including + the removal of `changelog.d/` files), e.g. as follows + +```bash +git add changelog.d/ docs/changelog.rst +git add sdk/funcx/version.py endpoint/funcx_endpoint/version.py +git commit -m 'Bump versions and changelog for release' +git push +``` + +4. Run the release script `./release.sh` from the repo root. This will use + `tox` and your pypi credentials and will create a signed release tag. At the + end of this step, new packages will be published to pypi. + +5. Push the release tag, e.g. `git push upstream 2.0.2` + +6. Update the version numbers to the next point version and re-add the `a0` suffix, + if necessary, then commit and push, e.g. + +```bash +$EDITOR sdk/funcx/version.py endpoint/funcx_endpoint/version.py +git add sdk/funcx/version.py endpoint/funcx_endpoint/version.py +git commit -m 'Bump versions for release' +git push +``` diff --git a/funcx_sdk/LICENSE b/compute_funcx/endpoint/LICENSE similarity index 100% rename from funcx_sdk/LICENSE rename to compute_funcx/endpoint/LICENSE diff --git a/compute_funcx/endpoint/README.md b/compute_funcx/endpoint/README.md new file mode 100644 index 000000000..0c0fefb2c --- /dev/null +++ b/compute_funcx/endpoint/README.md @@ -0,0 +1,8 @@ +# funcx_endpoint Wrapper Package + +* This package serves as a backwards compatible library for current +`funcx-endpoint` users who may upgrade to the latest code but +does not want to make any changes to existing scripts that reference +`funcx_endpoint.*` classes to use `globus_compute_endpoint.*` instead. + +* Accomplished via a variety of `import globus_compute_endpoint.compute_class_X as class_X` in `__init__.py` and other files diff --git a/funcx_endpoint/funcx_endpoint/__init__.py b/compute_funcx/endpoint/funcx_endpoint/__init__.py similarity index 68% rename from funcx_endpoint/funcx_endpoint/__init__.py rename to compute_funcx/endpoint/funcx_endpoint/__init__.py index a7a096711..2c66b50fb 100644 --- a/funcx_endpoint/funcx_endpoint/__init__.py +++ b/compute_funcx/endpoint/funcx_endpoint/__init__.py @@ -1,4 +1,4 @@ from funcx_endpoint.version import __version__ as _version -__author__ = "The funcX team" +__author__ = "The Globus Compute Team" __version__ = _version diff --git a/compute_funcx/endpoint/funcx_endpoint/endpoint/utils/config.py b/compute_funcx/endpoint/funcx_endpoint/endpoint/utils/config.py new file mode 100644 index 000000000..f12f26008 --- /dev/null +++ b/compute_funcx/endpoint/funcx_endpoint/endpoint/utils/config.py @@ -0,0 +1 @@ +from globus_compute_endpoint.endpoint.utils.config import Config as Config # noqa: F401 diff --git a/compute_funcx/endpoint/funcx_endpoint/executors/__init__.py b/compute_funcx/endpoint/funcx_endpoint/executors/__init__.py new file mode 100644 index 000000000..f11aeb2e8 --- /dev/null +++ b/compute_funcx/endpoint/funcx_endpoint/executors/__init__.py @@ -0,0 +1,3 @@ +from globus_compute_endpoint.executors import HighThroughputExecutor + +__all__ = ["HighThroughputExecutor"] diff --git a/compute_funcx/endpoint/funcx_endpoint/executors/high_throughput/funcx_manager.py b/compute_funcx/endpoint/funcx_endpoint/executors/high_throughput/funcx_manager.py new file mode 100644 index 000000000..e211e6c51 --- /dev/null +++ b/compute_funcx/endpoint/funcx_endpoint/executors/high_throughput/funcx_manager.py @@ -0,0 +1,3 @@ +from globus_compute_endpoint.executors.high_throughput.manager import ( # noqa: F401 + Manager as FuncXManager, +) diff --git a/compute_funcx/endpoint/funcx_endpoint/executors/high_throughput/funcx_worker.py b/compute_funcx/endpoint/funcx_endpoint/executors/high_throughput/funcx_worker.py new file mode 100644 index 000000000..0daeb2a06 --- /dev/null +++ b/compute_funcx/endpoint/funcx_endpoint/executors/high_throughput/funcx_worker.py @@ -0,0 +1,3 @@ +from globus_compute_endpoint.executors.high_throughput.worker import ( # noqa: F401 + Worker as FuncXWorker, +) diff --git a/funcx_endpoint/funcx_endpoint/version.py b/compute_funcx/endpoint/funcx_endpoint/version.py similarity index 60% rename from funcx_endpoint/funcx_endpoint/version.py rename to compute_funcx/endpoint/funcx_endpoint/version.py index 3199724f6..2a3fcc7cc 100644 --- a/funcx_endpoint/funcx_endpoint/version.py +++ b/compute_funcx/endpoint/funcx_endpoint/version.py @@ -1,9 +1,7 @@ # single source of truth for package version, # see https://packaging.python.org/en/latest/single_source_version/ -__version__ = "1.0.13" +__version__ = "2.0.0a4" -# TODO: remove after a `funcx` release -# this variable is needed because it's imported by `funcx` to do the version check VERSION = __version__ # app name to send as part of requests diff --git a/compute_funcx/endpoint/setup.cfg b/compute_funcx/endpoint/setup.cfg new file mode 100644 index 000000000..72b0c5590 --- /dev/null +++ b/compute_funcx/endpoint/setup.cfg @@ -0,0 +1,15 @@ +[isort] +profile = black +known_first_party = globus-compute-sdk, globus-compute-endpoint, funcx, funcx-endpoint + +[flake8] +# config to be black-compatible +max-line-length = 88 +ignore = E203,W503,W504 + +[mypy] +# strict = true +ignore_missing_imports = true +warn_unreachable = true +warn_no_return = true +exclude = tests diff --git a/compute_funcx/endpoint/setup.py b/compute_funcx/endpoint/setup.py new file mode 100644 index 000000000..4d3da281f --- /dev/null +++ b/compute_funcx/endpoint/setup.py @@ -0,0 +1,48 @@ +import os + +from setuptools import find_packages, setup + +REQUIRES = [ + "globus-compute-endpoint>=2.0.0", +] + +version_ns = {} +with open(os.path.join("funcx_endpoint", "version.py")) as f: + exec(f.read(), version_ns) +version = version_ns["__version__"] + +setup( + name="funcx-endpoint", + version=version, + packages=find_packages(), + description="funcX: High Performance Function Serving for Science", + install_requires=REQUIRES, + extras_require={}, + python_requires=">=3.7", + classifiers=[ + "Development Status :: 3 - Alpha", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Natural Language :: English", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Topic :: Scientific/Engineering", + ], + keywords=["funcX", "FaaS", "Function Serving"], + entry_points={ + "console_scripts": [ + "funcx-endpoint=globus_compute_endpoint.cli:cli_run_funcx", + "funcx-interchange" + "=globus_compute_endpoint.executors.high_throughput.interchange:cli_run", + "funcx-manager" + "=globus_compute_endpoint.executors.high_throughput.manager:cli_run", + "funcx-worker" + "=globus_compute_endpoint.executors.high_throughput.worker:cli_run", + ] + }, + include_package_data=True, + author="Globus Compute Team", + author_email="support@globus.org", + license="Apache License, Version 2.0", + url="https://github.com/funcx-faas/funcx", +) diff --git a/compute_funcx/endpoint/tox.ini b/compute_funcx/endpoint/tox.ini new file mode 100644 index 000000000..811d3aeb3 --- /dev/null +++ b/compute_funcx/endpoint/tox.ini @@ -0,0 +1,9 @@ +[testenv:publish-release] +skip_install = true +deps = build + twine +# clean the build dir before rebuilding +whitelist_externals = rm +commands_pre = rm -rf dist/ +commands = python -m build + twine upload dist/* diff --git a/compute_funcx/release.sh b/compute_funcx/release.sh new file mode 100755 index 000000000..06d957ea5 --- /dev/null +++ b/compute_funcx/release.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# +# Script to release a new version of the SDK and the Endpoint +# +# It does this by creating a tag named after the version, then running the +# tox release command for each package +# +# Requirements: +# the version is set in sdk +# the version is set in endpoint and matches compute_funcx/sdk +# you must have valid git config to create a signed tag (GPG key) +# you must have pypi credentials available to twine (e.g. ~/.pypirc) + +set -euo pipefail + +VERSION="$(grep '^__version__' sdk/funcx/version.py | cut -d '"' -f 2)" +ENDPOINT_VERSION="$(grep '^__version__' endpoint/funcx_endpoint/version.py | cut -d '"' -f 2)" + +if [[ "$VERSION" != "$ENDPOINT_VERSION" ]]; then + echo "package versions mismatched: sdk=$VERSION endpoint=$ENDPOINT_VERSION" + exit 1 +fi + +if ! grep '^funcx \& funcx\-endpoint v'"$VERSION"'$' docs/changelog.rst; then + echo "package version v$VERSION not noted in docs/changelog.rst" + exit 1 +fi + +echo "releasing v$VERSION" +git tag -s "$VERSION" -m "v$VERSION" + +pushd sdk +tox -e publish-release +popd + +cd endpoint +tox -e publish-release diff --git a/compute_funcx/sdk/LICENSE b/compute_funcx/sdk/LICENSE new file mode 100644 index 000000000..b975e431c --- /dev/null +++ b/compute_funcx/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2019] [The University of Chicago] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/compute_funcx/sdk/funcx/__init__.py b/compute_funcx/sdk/funcx/__init__.py new file mode 100644 index 000000000..c167868a6 --- /dev/null +++ b/compute_funcx/sdk/funcx/__init__.py @@ -0,0 +1,17 @@ +""" Globus Compute, formerly funcX: Fast function serving for clouds, + clusters and supercomputers. +""" +import warnings + +from funcx.version import DEPRECATION_FUNCX +from funcx.version import __version__ as _version + +__author__ = "The Globus Compute team" +__version__ = _version + +from globus_compute_sdk import Client as FuncXClient +from globus_compute_sdk import Executor as FuncXExecutor + +warnings.warn(DEPRECATION_FUNCX) + +__all__ = ("FuncXExecutor", "FuncXClient") diff --git a/compute_funcx/sdk/funcx/sdk/login_manager/__init__.py b/compute_funcx/sdk/funcx/sdk/login_manager/__init__.py new file mode 100644 index 000000000..5c5316c97 --- /dev/null +++ b/compute_funcx/sdk/funcx/sdk/login_manager/__init__.py @@ -0,0 +1,13 @@ +from globus_compute_sdk.sdk.login_manager import ComputeScopes as FuncxScopes +from globus_compute_sdk.sdk.login_manager import LoginManager as LoginManager +from globus_compute_sdk.sdk.login_manager import ( + LoginManagerProtocol as LoginManagerProtocol, +) +from globus_compute_sdk.sdk.login_manager import requires_login as requires_login + +__all__ = ( + "LoginManager", + "FuncxScopes", + "LoginManagerProtocol", + "requires_login", +) diff --git a/compute_funcx/sdk/funcx/sdk/web_client.py b/compute_funcx/sdk/funcx/sdk/web_client.py new file mode 100644 index 000000000..73b9965b3 --- /dev/null +++ b/compute_funcx/sdk/funcx/sdk/web_client.py @@ -0,0 +1 @@ +from globus_compute_sdk.sdk.web_client import WebClient as FuncXWebClient # noqa: F401 diff --git a/compute_funcx/sdk/funcx/serialize/__init__.py b/compute_funcx/sdk/funcx/serialize/__init__.py new file mode 100644 index 000000000..205e82e5d --- /dev/null +++ b/compute_funcx/sdk/funcx/serialize/__init__.py @@ -0,0 +1,3 @@ +from globus_compute_sdk.serialize import ComputeSerializer as FuncXSerializer + +__all__ = ("FuncXSerializer",) diff --git a/compute_funcx/sdk/funcx/version.py b/compute_funcx/sdk/funcx/version.py new file mode 100644 index 000000000..1b8ba98ff --- /dev/null +++ b/compute_funcx/sdk/funcx/version.py @@ -0,0 +1,37 @@ +from globus_compute_sdk.errors import VersionMismatch +from packaging.version import Version + +# single source of truth for package version, +# see https://packaging.python.org/en/latest/single_source_version/ +__version__ = "2.0.0a4" + +DEPRECATION_FUNCX = """ +The funcX SDK has been renamed to Globus Compute SDK and the new package is +available on PyPI: + https://pypi.org/project/globus-compute-sdk/ + +Please consider upgrading to Globus Compute. More information can be found at: + https://globus-compute.readthedocs.io/en/latest/funcx_upgrade.html +""" + + +def compare_versions( + current: str, min_version: str, *, package_name: str = "funcx" +) -> None: + current_v = Version(current) + min_v = Version(min_version) + + if ( + current_v.is_devrelease + or min_v.is_devrelease + and current_v.release == min_v.release + ): + return + + if current_v < min_v: + raise VersionMismatch( + f"Your version={current} is lower than the " + f"minimum version for {package_name}: {min_version}. " + "Please update. " + f"pip install {package_name}>={min_version}" + ) diff --git a/compute_funcx/sdk/setup.cfg b/compute_funcx/sdk/setup.cfg new file mode 100644 index 000000000..72b0c5590 --- /dev/null +++ b/compute_funcx/sdk/setup.cfg @@ -0,0 +1,15 @@ +[isort] +profile = black +known_first_party = globus-compute-sdk, globus-compute-endpoint, funcx, funcx-endpoint + +[flake8] +# config to be black-compatible +max-line-length = 88 +ignore = E203,W503,W504 + +[mypy] +# strict = true +ignore_missing_imports = true +warn_unreachable = true +warn_no_return = true +exclude = tests diff --git a/compute_funcx/sdk/setup.py b/compute_funcx/sdk/setup.py new file mode 100644 index 000000000..412486140 --- /dev/null +++ b/compute_funcx/sdk/setup.py @@ -0,0 +1,48 @@ +import os +import re + +from setuptools import find_packages, setup + +REQUIRES = [ + "globus-compute-sdk>=2.0.0", +] + + +def parse_version(): + # single source of truth for package version + version_string = "" + version_pattern = re.compile(r'__version__ = "([^"]*)"') + with open(os.path.join("funcx", "version.py")) as f: + for line in f: + match = version_pattern.match(line) + if match: + version_string = match.group(1) + break + if not version_string: + raise RuntimeError("Failed to parse version information") + return version_string + + +setup( + name="funcx", + version=parse_version(), + packages=find_packages(), + description="Globus Compute: High Performance Function Serving for Science", + install_requires=REQUIRES, + extras_require={}, + python_requires=">=3.7", + classifiers=[ + "Development Status :: 3 - Alpha", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Natural Language :: English", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Topic :: Scientific/Engineering", + ], + keywords=["funcX", "FaaS", "Function Serving", "Globus Compute"], + author="The Globus Compute Team", + author_email="support@globus.org", + license="Apache License, Version 2.0", + url="https://github.com/funcx-faas/funcx", +) diff --git a/compute_funcx/sdk/tox.ini b/compute_funcx/sdk/tox.ini new file mode 100644 index 000000000..811d3aeb3 --- /dev/null +++ b/compute_funcx/sdk/tox.ini @@ -0,0 +1,9 @@ +[testenv:publish-release] +skip_install = true +deps = build + twine +# clean the build dir before rebuilding +whitelist_externals = rm +commands_pre = rm -rf dist/ +commands = python -m build + twine upload dist/* diff --git a/funcx_sdk/.coveragerc b/compute_sdk/.coveragerc similarity index 94% rename from funcx_sdk/.coveragerc rename to compute_sdk/.coveragerc index bc06b223f..1a0213d56 100644 --- a/funcx_sdk/.coveragerc +++ b/compute_sdk/.coveragerc @@ -1,5 +1,5 @@ [run] -include = funcx/* +include = globus_compute_sdk/* [report] show_missing = True diff --git a/compute_sdk/LICENSE b/compute_sdk/LICENSE new file mode 100644 index 000000000..b975e431c --- /dev/null +++ b/compute_sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2019] [The University of Chicago] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/compute_sdk/globus_compute_sdk/__init__.py b/compute_sdk/globus_compute_sdk/__init__.py new file mode 100644 index 000000000..cd5eb0d70 --- /dev/null +++ b/compute_sdk/globus_compute_sdk/__init__.py @@ -0,0 +1,13 @@ +""" Globus Compute : Fast function serving for clouds, clusters and supercomputers. + +""" +from globus_compute_sdk.version import __version__ as _version + +__author__ = "The Globus Compute Team" +__version__ = _version + +from globus_compute_sdk.sdk.client import Client +from globus_compute_sdk.sdk.container_spec import ContainerSpec +from globus_compute_sdk.sdk.executor import Executor + +__all__ = ("Executor", "Client", "ContainerSpec") diff --git a/funcx_sdk/funcx/errors/__init__.py b/compute_sdk/globus_compute_sdk/errors/__init__.py similarity index 70% rename from funcx_sdk/funcx/errors/__init__.py rename to compute_sdk/globus_compute_sdk/errors/__init__.py index bc6b24314..7b17fb2d3 100644 --- a/funcx_sdk/funcx/errors/__init__.py +++ b/compute_sdk/globus_compute_sdk/errors/__init__.py @@ -1,15 +1,15 @@ from .error_types import ( - FuncxError, - FuncxTaskExecutionFailed, + ComputeError, MaxResultSizeExceeded, SerializationError, + TaskExecutionFailed, TaskPending, VersionMismatch, ) __all__ = ( - "FuncxError", - "FuncxTaskExecutionFailed", + "ComputeError", + "TaskExecutionFailed", "MaxResultSizeExceeded", "SerializationError", "TaskPending", diff --git a/funcx_sdk/funcx/errors/error_types.py b/compute_sdk/globus_compute_sdk/errors/error_types.py similarity index 86% rename from funcx_sdk/funcx/errors/error_types.py rename to compute_sdk/globus_compute_sdk/errors/error_types.py index cfa5e7eea..275e6a622 100644 --- a/funcx_sdk/funcx/errors/error_types.py +++ b/compute_sdk/globus_compute_sdk/errors/error_types.py @@ -4,24 +4,24 @@ import time -class FuncxError(Exception): +class ComputeError(Exception): """Base class for all funcx exceptions""" def __str__(self): return self.__repr__() -class VersionMismatch(FuncxError): +class VersionMismatch(ComputeError): """Either client and endpoint version mismatch, or version cannot be retrieved.""" def __init__(self, version_message): self.version_message = version_message def __repr__(self): - return f"FuncX Versioning Issue: {self.version_message}" + return f"Globus Compute Versioning Issue: {self.version_message}" -class SerializationError(FuncxError): +class SerializationError(ComputeError): """Something failed during serialization or deserialization.""" def __init__(self, message): @@ -31,7 +31,7 @@ def __repr__(self): return f"Serialization Error during: {self.message}" -class TaskPending(FuncxError): +class TaskPending(ComputeError): """Task is pending and no result is available yet""" def __init__(self, reason): @@ -56,7 +56,7 @@ def __str__(self) -> str: ) -class FuncxTaskExecutionFailed(Exception): +class TaskExecutionFailed(Exception): """ Error result from the remote end, wrapped as an exception object """ diff --git a/funcx_sdk/funcx/sdk/__init__.py b/compute_sdk/globus_compute_sdk/sdk/__init__.py similarity index 100% rename from funcx_sdk/funcx/sdk/__init__.py rename to compute_sdk/globus_compute_sdk/sdk/__init__.py diff --git a/funcx_sdk/funcx/sdk/_environments.py b/compute_sdk/globus_compute_sdk/sdk/_environments.py similarity index 100% rename from funcx_sdk/funcx/sdk/_environments.py rename to compute_sdk/globus_compute_sdk/sdk/_environments.py diff --git a/funcx_sdk/funcx/sdk/asynchronous/__init__.py b/compute_sdk/globus_compute_sdk/sdk/asynchronous/__init__.py similarity index 100% rename from funcx_sdk/funcx/sdk/asynchronous/__init__.py rename to compute_sdk/globus_compute_sdk/sdk/asynchronous/__init__.py diff --git a/funcx_sdk/funcx/sdk/asynchronous/funcx_future.py b/compute_sdk/globus_compute_sdk/sdk/asynchronous/compute_future.py similarity index 87% rename from funcx_sdk/funcx/sdk/asynchronous/funcx_future.py rename to compute_sdk/globus_compute_sdk/sdk/asynchronous/compute_future.py index f02540951..93db33a6f 100644 --- a/funcx_sdk/funcx/sdk/asynchronous/funcx_future.py +++ b/compute_sdk/globus_compute_sdk/sdk/asynchronous/compute_future.py @@ -2,7 +2,7 @@ from concurrent.futures import Future -class FuncXFuture(Future): +class ComputeFuture(Future): """ Extend `concurrent.futures.Future`_ to include an optional task UUID. @@ -12,7 +12,7 @@ class FuncXFuture(Future): task_id: t.Optional[str] """The UUID for the task behind this Future. In batch mode, this will not be populated immediately, but will appear later when the task is - submitted to the FuncX services.""" + submitted to the Globus Compute services.""" def __init__(self, task_id: t.Optional[str] = None): super().__init__() diff --git a/funcx_sdk/funcx/sdk/asynchronous/funcx_task.py b/compute_sdk/globus_compute_sdk/sdk/asynchronous/compute_task.py similarity index 53% rename from funcx_sdk/funcx/sdk/asynchronous/funcx_task.py rename to compute_sdk/globus_compute_sdk/sdk/asynchronous/compute_task.py index 4af1fb3e6..23de98925 100644 --- a/funcx_sdk/funcx/sdk/asynchronous/funcx_task.py +++ b/compute_sdk/globus_compute_sdk/sdk/asynchronous/compute_task.py @@ -1,9 +1,9 @@ import asyncio -class FuncXTask(asyncio.Future): +class ComputeTask(asyncio.Future): """ - Represents a submitted funcX task with an asychio wrapper + Represents a submitted Globus Compute task with an asychio wrapper """ def __init__(self, task_id): @@ -11,17 +11,19 @@ def __init__(self, task_id): Parameters ---------- task_id : uuid str - The uuid of the funcX task this instance is shadowing + The uuid of the Globus Compute task this instance is shadowing """ super().__init__() self.task_id = task_id def __str__(self): - return "FuncX Task ID " + self.task_id + return "Globus Compute Task ID " + self.task_id async def get_result(self): """ - Coroutine to wait on the funcX task to complete and then return the result + Coroutine waiting for the Globus Compute task to complete and then + return the result + :return: result : Any """ diff --git a/funcx_sdk/funcx/sdk/asynchronous/ws_polling_task.py b/compute_sdk/globus_compute_sdk/sdk/asynchronous/ws_polling_task.py similarity index 94% rename from funcx_sdk/funcx/sdk/asynchronous/ws_polling_task.py rename to compute_sdk/globus_compute_sdk/sdk/asynchronous/ws_polling_task.py index 209877d21..ce868aa0d 100644 --- a/funcx_sdk/funcx/sdk/asynchronous/ws_polling_task.py +++ b/compute_sdk/globus_compute_sdk/sdk/asynchronous/ws_polling_task.py @@ -9,16 +9,15 @@ # import from `websockets.client`, see: # https://github.com/aaugustin/websockets/issues/940 import websockets.client +from globus_compute_sdk.errors import TaskExecutionFailed +from globus_compute_sdk.sdk.asynchronous.compute_future import ComputeFuture +from globus_compute_sdk.sdk.asynchronous.compute_task import ComputeTask from websockets.exceptions import ( ConnectionClosedOK, InvalidHandshake, InvalidStatusCode, ) -from funcx.errors import FuncxTaskExecutionFailed -from funcx.sdk.asynchronous.funcx_future import FuncXFuture -from funcx.sdk.asynchronous.funcx_task import FuncXTask - log = logging.getLogger(__name__) # Add extra allowance for result wrappers @@ -27,7 +26,7 @@ class WebSocketPollingTask: - """The WebSocketPollingTask is used by the FuncXExecutor and the FuncXClient + """The WebSocketPollingTask is used by the Executor and the Client to asynchronously listen to a stream of results. It uses a synchronized counter to identify when there are no more tasks and exit to avoid blocking the main thread from exiting. @@ -47,7 +46,7 @@ def __init__( ========== funcx_client : client object - Instance of FuncXClient to be used by the executor + Instance of Client to be used by the executor loop : event loop The asnycio event loop that the WebSocket client will run on @@ -87,7 +86,7 @@ def __init__( # new thread asyncio.set_event_loop(self.loop) self.task_group_ids_queue: asyncio.Queue[str] = asyncio.Queue() - self.pending_tasks: t.Dict[str, FuncXTask] = {} + self.pending_tasks: t.Dict[str, ComputeTask] = {} self.unknown_results: t.Dict[str, t.Any] = {} self.closed_by_main_thread = False @@ -200,13 +199,13 @@ async def handle_incoming(self, pending_futures, auto_close=False) -> bool: log.info("WebSocket connection closed by main thread") return True - async def set_result(self, task_fut: FuncXFuture, task_data: t.Dict): + async def set_result(self, task_fut: ComputeFuture, task_data: t.Dict): """Sets the result of a future with given task_id in the pending_futures map, then decrement the atomic counter and close the WebSocket connection if needed Parameters ---------- - task_fut : FuncXFuture + task_fut : ComputeFuture Task future for which to parse and set task_data @@ -227,7 +226,7 @@ async def set_result(self, task_fut: FuncXFuture, task_data: t.Dict): ) elif "exception" in task_data: task_fut.set_exception( - FuncxTaskExecutionFailed( + TaskExecutionFailed( task_data["exception"], task_data["completion_t"] ) ) @@ -265,10 +264,11 @@ def put_task_group_id(self, task_group_id): self.running_task_group_ids.add(task_group_id) self.task_group_ids_queue.put_nowait(task_group_id) - def add_task(self, task: FuncXTask): + def add_task(self, task: ComputeTask): """ - Add a funcX task - :param task: FuncXTask + Add a Globus Compute task + + :param task: ComputeTask Task to be added """ self.pending_tasks[task.task_id] = task diff --git a/funcx_sdk/funcx/sdk/batch.py b/compute_sdk/globus_compute_sdk/sdk/batch.py similarity index 92% rename from funcx_sdk/funcx/sdk/batch.py rename to compute_sdk/globus_compute_sdk/sdk/batch.py index 106703163..5a2924a13 100644 --- a/funcx_sdk/funcx/sdk/batch.py +++ b/compute_sdk/globus_compute_sdk/sdk/batch.py @@ -2,11 +2,11 @@ import typing as t -from funcx.serialize import FuncXSerializer +from globus_compute_sdk.serialize import ComputeSerializer class Batch: - """Utility class for creating batch submission in funcX""" + """Utility class for creating batch submission in Globus Compute""" def __init__(self, task_group_id: str | None = None, create_websocket_queue=False): """ @@ -17,7 +17,7 @@ def __init__(self, task_group_id: str | None = None, create_websocket_queue=Fals UUID indicating the task group that this batch belongs to """ self.tasks: list[tuple[str, str, str]] = [] - self.fx_serializer = FuncXSerializer() + self.fx_serializer = ComputeSerializer() self.task_group_id = task_group_id self.create_websocket_queue = create_websocket_queue diff --git a/funcx_sdk/funcx/sdk/client.py b/compute_sdk/globus_compute_sdk/sdk/client.py similarity index 91% rename from funcx_sdk/funcx/sdk/client.py rename to compute_sdk/globus_compute_sdk/sdk/client.py index 26c939423..eacc14b4c 100644 --- a/funcx_sdk/funcx/sdk/client.py +++ b/compute_sdk/globus_compute_sdk/sdk/client.py @@ -8,17 +8,21 @@ import uuid import warnings -from funcx.errors import FuncxTaskExecutionFailed, SerializationError, TaskPending -from funcx.sdk._environments import ( +from globus_compute_sdk.errors import ( + SerializationError, + TaskExecutionFailed, + TaskPending, +) +from globus_compute_sdk.sdk._environments import ( get_web_service_url, get_web_socket_url, urls_might_mismatch, ) -from funcx.sdk.asynchronous.funcx_task import FuncXTask -from funcx.sdk.asynchronous.ws_polling_task import WebSocketPollingTask -from funcx.sdk.web_client import FunctionRegistrationData -from funcx.serialize import FuncXSerializer -from funcx.version import __version__, compare_versions +from globus_compute_sdk.sdk.asynchronous.compute_task import ComputeTask +from globus_compute_sdk.sdk.asynchronous.ws_polling_task import WebSocketPollingTask +from globus_compute_sdk.sdk.web_client import FunctionRegistrationData +from globus_compute_sdk.serialize import ComputeSerializer +from globus_compute_sdk.version import __version__, compare_versions from .batch import Batch from .login_manager import LoginManager, LoginManagerProtocol, requires_login @@ -28,10 +32,10 @@ _FUNCX_HOME = os.path.join("~", ".funcx") -class FuncXClient: - """Main class for interacting with the funcX service +class Client: + """Main class for interacting with the Globus Compute service - Holds helper operations for performing common tasks with the funcX service. + Holds helper operations for performing common tasks with the Globus Compute service. """ FUNCX_SDK_CLIENT_ID = os.environ.get( @@ -80,13 +84,13 @@ def __init__( results_ws_uri: str For internal use only. The address of the websocket service. - DEPRECATED - use FuncXExecutor instead. + DEPRECATED - use Executor instead. warn_about_url_mismatch: bool For internal use only. If true, a warning is logged if funcx_service_address and results_ws_uri appear to point to different environments. - DEPRECATED - use FuncXExecutor instead. + DEPRECATED - use Executor instead. do_version_check: bool Set to ``False`` to skip the version compatibility check on client @@ -98,7 +102,7 @@ def __init__( Currently only impacts the run method. DEPRECATED - this was an early attempt at asynchronous result gathering. - Use the FuncXExecutor instead. + Use the Executor instead. Default: False @@ -107,14 +111,14 @@ def __init__( event loop instance. If None, then we will access asyncio.get_event_loop() DEPRECATED - part of an early attempt at asynchronous result gathering. - Use the FuncXExecutor instead. + Use the Executor instead. Default: None task_group_id: str|uuid.UUID - Set the TaskGroup ID (a UUID) for this FuncXClient instance. + Set the TaskGroup ID (a UUID) for this Client instance. Typically, one uses this to submit new tasks to an existing - session or to reestablish FuncXExecutor futures. + session or to reestablish Executor futures. Default: None (will be auto generated) Keyword arguments are the same as for BaseClient. @@ -155,10 +159,10 @@ def __init__( self.login_manager = LoginManager(environment=environment) self.login_manager.ensure_logged_in() - self.web_client = self.login_manager.get_funcx_web_client( + self.web_client = self.login_manager.get_web_client( base_url=funcx_service_address ) - self.fx_serializer = FuncXSerializer() + self.fx_serializer = ComputeSerializer() self.funcx_service_address = funcx_service_address @@ -207,7 +211,7 @@ def version_check(self, endpoint_version: str | None = None) -> None: compare_versions(__version__, min_sdk_version) if endpoint_version is not None: compare_versions( - endpoint_version, min_ep_version, package_name="funcx-endpoint" + endpoint_version, min_ep_version, package_name="globus-compute-endpoint" ) def logout(self): @@ -223,7 +227,7 @@ def _update_task_table(self, return_msg: str | t.Dict, task_id: str): ---------- return_msg : str | t.Dict - Return message received from the funcx service + Return message received from the Globus Compute service task_id : str task id string """ @@ -248,7 +252,7 @@ def _update_task_table(self, return_msg: str | t.Dict, task_id: str): else: status.update({"result": r_obj, "completion_t": completion_t}) elif "exception" in r_dict: - raise FuncxTaskExecutionFailed(r_dict["exception"], completion_t) + raise TaskExecutionFailed(r_dict["exception"], completion_t) else: raise NotImplementedError("unreachable") @@ -257,7 +261,7 @@ def _update_task_table(self, return_msg: str | t.Dict, task_id: str): @requires_login def get_task(self, task_id): - """Get a funcX task. + """Get a Globus Compute task. Parameters ---------- @@ -280,7 +284,7 @@ def get_task(self, task_id): @requires_login def get_result(self, task_id): - """Get the result of a funcX task + """Get the result of a Globus Compute task Parameters ---------- @@ -363,7 +367,7 @@ def run(self, *args, endpoint_id=None, function_id=None, **kwargs) -> str: task_id : str UUID string that identifies the task if asynchronous is False - funcX Task: asyncio.Task + Globus Compute Task: asyncio.Task A future that will eventually resolve into the function's result if asynchronous is True """ @@ -378,7 +382,7 @@ def run(self, *args, endpoint_id=None, function_id=None, **kwargs) -> str: def create_batch(self, task_group_id=None, create_websocket_queue=False) -> Batch: """ - Create a Batch instance to handle batch submission in funcX + Create a Batch instance to handle batch submission in Globus Compute Parameters ---------- @@ -407,7 +411,7 @@ def create_batch(self, task_group_id=None, create_websocket_queue=False) -> Batc @requires_login def batch_run(self, batch) -> t.List[str]: - """Initiate a batch of tasks to funcX + """Initiate a batch of tasks to Globus Compute Parameters ---------- @@ -422,7 +426,7 @@ def batch_run(self, batch) -> t.List[str]: data = batch.prepare() - # Send the data to funcX + # Send the data to Globus Compute r = self.web_client.submit(data) task_uuids: t.List[str] = [] @@ -434,20 +438,20 @@ def batch_run(self, batch) -> t.List[str]: # ideal, as it will raise any error in the multi-response, # but it will do until batch_run is deprecated in favor of Executer # Note that some errors may already be caught and raised - # by funcx.sdk.client.request as GlobusAPIError + # by globus_compute_sdk.sdk.client.request as GlobusAPIError # Checking for 'Failed' is how FuncxResponseError.unpack # originally checked for errors. # All errors should have 'reason' but just in case error_reason = result.get("reason", "Unknown execution failure") - raise FuncxTaskExecutionFailed(error_reason) + raise TaskExecutionFailed(error_reason) if self.asynchronous: task_group_id = r["task_group_id"] asyncio_tasks = [] for task_id in task_uuids: - funcx_task = FuncXTask(task_id) + funcx_task = ComputeTask(task_id) asyncio_task = self.loop.create_task(funcx_task.get_result()) asyncio_tasks.append(asyncio_task) @@ -465,7 +469,7 @@ def register_endpoint( metadata=None, multi_tenant=False, ): - """Register an endpoint with the funcX service. + """Register an endpoint with the Globus Compute service. Parameters ---------- @@ -499,8 +503,8 @@ def get_result_amqp_url(self) -> dict[str, str]: @requires_login def get_containers(self, name, description=None): """ - Register a DLHub endpoint with the funcX service and get the containers to - launch. + Register a DLHub endpoint with the Globus Compute service and get + the containers to launch. Parameters ---------- @@ -599,7 +603,7 @@ def register_function( group=None, searchable=None, ) -> str: - """Register a function code with the funcX service. + """Register a function code with the Globus Compute service. Parameters ---------- @@ -608,7 +612,7 @@ def register_function( function_name : str The entry point (function name) of the function. Default: None container_uuid : str - Container UUID from registration with funcX + Container UUID from registration with Globus Compute description : str Description of the file public : bool @@ -647,7 +651,7 @@ def register_function( @requires_login def register_container(self, location, container_type, name="", description=""): - """Register a container with the funcX service. + """Register a container with the Globus Compute service. Parameters ---------- @@ -690,7 +694,7 @@ def build_container(self, container_spec): Parameters ---------- - container_spec : funcx.sdk.container_spec.ContainerSpec + container_spec : globus_compute_sdk.sdk.container_spec.ContainerSpec Complete specification of what goes into the container Returns diff --git a/funcx_sdk/funcx/sdk/container_spec.py b/compute_sdk/globus_compute_sdk/sdk/container_spec.py similarity index 94% rename from funcx_sdk/funcx/sdk/container_spec.py rename to compute_sdk/globus_compute_sdk/sdk/container_spec.py index fc901fc68..cd1c9e6d9 100644 --- a/funcx_sdk/funcx/sdk/container_spec.py +++ b/compute_sdk/globus_compute_sdk/sdk/container_spec.py @@ -25,9 +25,9 @@ def __init__( Parameters ---------- name : str - Name of this container to be used inside funcx + Name of this container to be used inside Globus Compute description : str - Description of the container inside funcx + Description of the container inside Globus Compute apt : List[str] List of Ubuntu library packages to install in container pip : List[str] diff --git a/funcx_sdk/funcx/sdk/executor.py b/compute_sdk/globus_compute_sdk/sdk/executor.py similarity index 91% rename from funcx_sdk/funcx/sdk/executor.py rename to compute_sdk/globus_compute_sdk/sdk/executor.py index 77db755a1..77052ae92 100644 --- a/funcx_sdk/funcx/sdk/executor.py +++ b/compute_sdk/globus_compute_sdk/sdk/executor.py @@ -21,13 +21,12 @@ class InvalidStateError(Exception): import pika -from funcx_common import messagepack -from funcx_common.messagepack.message_types import Result - -from funcx.errors import FuncxTaskExecutionFailed -from funcx.sdk.asynchronous.funcx_future import FuncXFuture -from funcx.sdk.client import FuncXClient -from funcx.sdk.utils import chunk_by +from globus_compute_common import messagepack +from globus_compute_common.messagepack.message_types import Result +from globus_compute_sdk.errors import TaskExecutionFailed +from globus_compute_sdk.sdk.asynchronous.compute_future import ComputeFuture +from globus_compute_sdk.sdk.client import Client +from globus_compute_sdk.sdk.utils import chunk_by log = logging.getLogger(__name__) @@ -41,15 +40,15 @@ class InvalidStateError(Exception): _REGISTERED_FXEXECUTORS: dict[int, t.Any] = {} -def __funcxexecutor_atexit(): +def __executor_atexit(): threading.main_thread().join() to_shutdown = list(_REGISTERED_FXEXECUTORS.values()) while to_shutdown: - fxe = to_shutdown.pop() - fxe.shutdown() + gce = to_shutdown.pop() + gce.shutdown() -threading.Thread(target=__funcxexecutor_atexit).start() +threading.Thread(target=__executor_atexit).start() class TaskSubmissionInfo: @@ -79,7 +78,7 @@ def __repr__(self): class AtomicController: - """This is used to synchronize between the FuncXExecutor which starts + """This is used to synchronize between the Executor which starts WebSocketPollingTasks and the WebSocketPollingTask which closes itself when there are 0 tasks. """ @@ -116,11 +115,11 @@ def __repr__(self): return f"AtomicController value:{self._value}" -class FuncXExecutor(concurrent.futures.Executor): +class Executor(concurrent.futures.Executor): """ - Extend Python's |Executor|_ base class for funcX's purposes. + Extend Python's |Executor|_ base class for Globus Compute's purposes. - .. |Executor| replace:: ``FuncXExecutor`` + .. |Executor| replace:: ``Executor`` .. _Executor: https://docs.python.org/3/library/concurrent.futures.html#executor-objects """ # noqa @@ -128,7 +127,7 @@ def __init__( self, endpoint_id: str | None = None, container_id: str | None = None, - funcx_client: FuncXClient | None = None, + funcx_client: Client | None = None, task_group_id: str | None = None, label: str = "", batch_size: int = 128, @@ -137,7 +136,7 @@ def __init__( """ :param endpoint_id: id of the endpoint to which to submit tasks :param container_id: id of the container in which to execute tasks - :param funcx_client: instance of FuncXClient to be used by the + :param funcx_client: instance of Client to be used by the executor. If not provided, the executor will instantiate one with default arguments. :param task_group_id: The Task Group to which to associate tasks. If not set, @@ -162,7 +161,7 @@ def __init__( raise TypeError(msg) if not funcx_client: - funcx_client = FuncXClient() + funcx_client = Client() self.funcx_client = funcx_client self.endpoint_id = endpoint_id @@ -174,7 +173,7 @@ def __init__( self._task_counter: int = 0 self._task_group_id: str = task_group_id or str(uuid.uuid4()) self._tasks_to_send: queue.Queue[ - tuple[FuncXFuture, TaskSubmissionInfo] | tuple[None, None] + tuple[ComputeFuture, TaskSubmissionInfo] | tuple[None, None] ] = queue.Queue() self._function_registry: dict[tuple[t.Callable, str | None], str] = {} @@ -207,13 +206,13 @@ def task_group_id(self) -> str: Must be a string. Set by simple assignment:: - fxe = FuncXExecutor(endpoint_id="...") - fxe.task_group_id = "Some-stored-id" + gce = Executor(endpoint_id="...") + gce.task_group_id = "Some-stored-id" This is typically used when reattaching to a previously initiated set of tasks. See `reload_tasks()`_ for more information. - [default: ``None``, which translates to the FuncXClient task group id] + [default: ``None``, which translates to the Client task group id] """ return self._task_group_id @@ -233,7 +232,7 @@ def register_function( All function execution submissions (i.e., ``.submit()``) communicate which pre-registered function to execute on the endpoint by the function's identifier, the ``function_id``. This method makes the appropriate API - call to the funcX web services to first register the task function, and + call to the Globus Compute web services to first register the task function, and then stores the returned ``function_id`` in the Executor's cache. In the standard workflow, ``.submit()`` will automatically handle invoking @@ -254,7 +253,7 @@ def register_function( :param function_id: if specified, associate the ``function_id`` to the ``fn`` immediately, short-circuiting the upstream registration call. :param func_register_kwargs: all other keyword arguments are passed to - the ``FuncXClient.register_function()``. + the ``Client.register_function()``. :returns: the function's ``function_id`` string, as returned by registration upstream """ @@ -294,14 +293,14 @@ def submit(self, fn, *args, **kwargs): with the given arguments. Schedules the callable to be executed as ``fn(*args, **kwargs)`` and - returns a FuncXFuture instance representing the execution of the + returns a ComputeFuture instance representing the execution of the callable. Example use:: >>> def add(a: int, b: int) -> int: return a + b - >>> fxe = FuncXExecutor(endpoint_id="some-ep-id") - >>> fut = fxe.submit(add, 1, 2) + >>> gce = Executor(endpoint_id="some-ep-id") + >>> fut = gce.submit(add, 1, 2) >>> fut.result() # wait (block) until result is received from remote 3 @@ -311,8 +310,8 @@ def submit(self, fn, *args, **kwargs): :param kwargs: keyword arguments (if any) as required to execute the function :returns: a future object that will receive a ``.task_id`` when the - funcX Web Service acknowledges receipt, and eventually will have - a ``.result()`` when the funcX web services receive and stream it. + Globus Compute Web Service acknowledges receipt, and eventually will have + a ``.result()`` when the Globus Compute web services receive and stream it. """ if self._stopped: err_fmt = "%s is shutdown; no new functions may be executed" @@ -336,18 +335,18 @@ def submit_to_registered_function( """ Request an execution of an already registered function. - This method supports use of public functions with the FuncXExecutor, or + This method supports use of public functions with the Executor, or knowledge of an already registered function. An example use might be:: # pre_registration.py - from funcx import FuncXExecutor + from globus_compute_sdk import Executor def some_processor(*args, **kwargs): # ... function logic ... return ["some", "result"] - fxe = FuncXExecutor() - fn_id = fxe.register_function(some_processor) + gce = Executor() + fn_id = gce.register_function(some_processor) print(f"Function registered successfully.\\nFunction ID: {fn_id}") # Example output: @@ -359,12 +358,12 @@ def some_processor(*args, **kwargs): the function id is just a string. One could substitute for a publicly available function. For instance, ``b0a5d1a0-2b22-4381-b899-ba73321e41e0`` is a "well-known" uuid for the "Hello, World!" function (same as the example in - the FuncX tutorial), which is publicly available:: + the Globus Compute tutorial), which is publicly available:: - from funcx import FuncXExecutor + from globus_compute_sdk import Executor fn_id = "b0a5d1a0-2b22-4381-b899-ba73321e41e0" # public; "Hello World" - with FuncXExecutor(endpoint_id="your-endpoint-id") as fxe: + with Executor(endpoint_id="your-endpoint-id") as fxe: futs = [ fxe.submit_to_registered_function(function_id=fn_id) for i in range(5) @@ -379,7 +378,7 @@ def some_processor(*args, **kwargs): :param kwargs: keyword arguments (if any) as required to execute the function :returns: a future object that (eventually) will have a ``.result()`` - when the funcX web services receive and stream it. + when the Globus Compute web services receive and stream it. """ if self._stopped: err_fmt = "%s is shutdown; no new functions may be executed" @@ -389,8 +388,8 @@ def some_processor(*args, **kwargs): msg = ( "No endpoint_id set. Did you forget to set it at construction?\n" " Hint:\n\n" - " fxe = FuncXExecutor(endpoint_id=)\n" - " fxe.endpoint_id = # alternative" + " gce = Executor(endpoint_id=)\n" + " gce.endpoint_id = # alternative" ) self.shutdown(wait=False, cancel_futures=True) raise ValueError(msg) @@ -410,13 +409,13 @@ def some_processor(*args, **kwargs): kwargs=kwargs, ) - fut = FuncXFuture() + fut = ComputeFuture() self._tasks_to_send.put((fut, task)) return fut def map(self, fn: t.Callable, *iterables, timeout=None, chunksize=1) -> t.Iterator: """ - FuncX does not currently implement the `.map()`_ method of the `Executor + Globus Compute does not currently implement the `.map()`_ method of the `Executor interface`_. In a naive implementation, this method would merely be syntactic sugar for bulk use of the ``.submit()`` method. For example:: @@ -436,7 +435,7 @@ def map(fxexec, fn, *fn_args_kwargs): """ # noqa raise NotImplementedError() - def reload_tasks(self) -> t.Iterable[FuncXFuture]: + def reload_tasks(self) -> t.Iterable[ComputeFuture]: """ .. _reload_tasks(): @@ -473,11 +472,11 @@ def reload_tasks(self) -> t.Iterable[FuncXFuture]: # step 3: create the associated set of futures task_ids: list[str] = [task["id"] for task in r.get("tasks", [])] - futures: list[FuncXFuture] = [] + futures: list[ComputeFuture] = [] if task_ids: # Complete the futures that already have results. - pending: list[FuncXFuture] = [] + pending: list[ComputeFuture] = [] deserialize = self.funcx_client.fx_serializer.deserialize chunk_size = 1024 num_chunks = len(task_ids) // chunk_size + 1 @@ -496,7 +495,7 @@ def reload_tasks(self) -> t.Iterable[FuncXFuture]: res = self.funcx_client.web_client.get_batch_status(id_chunk) for task_id, task in res.data.get("results", {}).items(): - fut = FuncXFuture(task_id) + fut = ComputeFuture(task_id) futures.append(fut) completed_t = task.get("completion_t") if not completed_t: @@ -506,12 +505,12 @@ def reload_tasks(self) -> t.Iterable[FuncXFuture]: if task.get("status") == "success": fut.set_result(deserialize(task["result"])) else: - exc = FuncxTaskExecutionFailed( + exc = TaskExecutionFailed( task["exception"], completed_t ) fut.set_exception(exc) except Exception as exc: - funcx_err = FuncxTaskExecutionFailed( + funcx_err = TaskExecutionFailed( "Failed to set result or exception" ) funcx_err.__cause__ = exc @@ -572,9 +571,9 @@ def _task_submitter_impl(self) -> None: "%s: task submission thread started (%s)", self, threading.get_ident() ) to_send = self._tasks_to_send # cache lookup - futs: list[FuncXFuture] = [] # for mypy/the exception branch + futs: list[ComputeFuture] = [] # for mypy/the exception branch try: - fut: FuncXFuture | None = FuncXFuture() # just start the loop; please + fut: ComputeFuture | None = ComputeFuture() # just start the loop; please while fut is not None: futs = [] tasks: list[TaskSubmissionInfo] = [] @@ -597,7 +596,7 @@ def _task_submitter_impl(self) -> None: if not tasks: continue - log.info(f"Submitting tasks to funcX: {len(tasks)}") + log.info(f"Submitting tasks to Globus Compute: {len(tasks)}") self._submit_tasks(futs, tasks) with self._shutdown_lock: @@ -662,12 +661,12 @@ def _task_submitter_impl(self) -> None: pass log.debug("%s: task submission thread complete", self) - def _submit_tasks(self, futs: list[FuncXFuture], tasks: list[TaskSubmissionInfo]): + def _submit_tasks(self, futs: list[ComputeFuture], tasks: list[TaskSubmissionInfo]): """ Submit a batch of tasks to the webservice, destined for self.endpoint_id. Upon success, update the futures with their associated task_id. - :param futs: a list of FuncXFutures; will have their task_id attribute + :param futs: a list of ComputeFutures; will have their task_id attribute set when function completes successfully. :param tasks: a list of tasks to submit upstream in a batch. """ @@ -677,12 +676,12 @@ def _submit_tasks(self, futs: list[FuncXFuture], tasks: list[TaskSubmissionInfo] ) for task in tasks: batch.add(task.function_id, task.endpoint_id, task.args, task.kwargs) - log.debug("Added task to funcX batch: %s", task) + log.debug("Added task to Globus Compute batch: %s", task) try: batch_tasks = self.funcx_client.batch_run(batch) except Exception: - log.error(f"Error submitting {len(tasks)} tasks to funcX") + log.error(f"Error submitting {len(tasks)} tasks to Globus Compute") raise self.task_count_submitted += len(batch_tasks) @@ -699,17 +698,17 @@ def _submit_tasks(self, futs: list[FuncXFuture], tasks: list[TaskSubmissionInfo] class _ResultWatcher(threading.Thread): """ _ResultWatcher is an internal SDK class meant for consumption by the - FuncXExecutor. It is a standard async AMQP consumer implementation + Executor. It is a standard async AMQP consumer implementation using the Pika library that matches futures from the Executor against - results received from the funcX hosted services. + results received from the Globus Compute hosted services. Expected usage:: - rw = _ResultWatcher(self) # assert isinstance(self, FuncXExecutor) + rw = _ResultWatcher(self) # assert isinstance(self, Executor) rw.start() - # rw is its own thread; it will use the FuncXClient attached to the - # FuncXExecutor to acquire AMQP credentials, and then will open a + # rw is its own thread; it will use the Client attached to the + # Executor to acquire AMQP credentials, and then will open a # connection to the AMQP service. rw.watch_for_task_results(some_list_of_futures) @@ -718,7 +717,7 @@ class _ResultWatcher(threading.Thread): will opportunistically shutdown; the caller must handle this scenario if new futures arrive, and create a new _ResultWatcher instance. - :param funcx_executor: A FuncXExecutor instance + :param funcx_executor: A Executor instance :param poll_period_s: [default: 0.5] how frequently to check for and handle events. For example, if the thread should stop due to user request or if there are results to match. @@ -736,7 +735,7 @@ class ShuttingDownError(Exception): def __init__( self, - funcx_executor: FuncXExecutor, + funcx_executor: Executor, poll_period_s=0.5, connect_attempt_limit=5, channel_close_window_s=10, @@ -754,7 +753,7 @@ def __init__( self._queue_prefix = "" - self._open_futures: dict[str, FuncXFuture] = {} + self._open_futures: dict[str, ComputeFuture] = {} self._received_results: dict[str, tuple[BasicProperties, Result]] = {} self._open_futures_empty = threading.Event() @@ -880,13 +879,13 @@ def _stopper_thread(): if wait: join_thread.join() - def watch_for_task_results(self, futures: list[FuncXFuture]) -> int: + def watch_for_task_results(self, futures: list[ComputeFuture]) -> int: """ - Add list of FuncXFutures to internal watch list. + Add list of ComputeFutures to internal watch list. Updates the thread's dictionary of futures that will be resolved when upstream sends associated results. The internal dictionary is keyed - on the FuncXFuture.task_id attribute, but this method does not verify + on the ComputeFuture.task_id attribute, but this method does not verify that it is set -- it is up to the caller to ensure the future is initialized fully. In particular, if a task_id is not set, it will not be added to the watch list. @@ -942,7 +941,7 @@ def _match_results_to_futures(self): if res.is_error: fut.set_exception( - FuncxTaskExecutionFailed(res.data, str(props.timestamp or 0)) + TaskExecutionFailed(res.data, str(props.timestamp or 0)) ) else: try: diff --git a/funcx_sdk/funcx/sdk/login_manager/__init__.py b/compute_sdk/globus_compute_sdk/sdk/login_manager/__init__.py similarity index 70% rename from funcx_sdk/funcx/sdk/login_manager/__init__.py rename to compute_sdk/globus_compute_sdk/sdk/login_manager/__init__.py index bdfb261c9..4381cd2c1 100644 --- a/funcx_sdk/funcx/sdk/login_manager/__init__.py +++ b/compute_sdk/globus_compute_sdk/sdk/login_manager/__init__.py @@ -1,10 +1,10 @@ from .decorators import requires_login -from .manager import FuncxScopes, LoginManager +from .manager import ComputeScopes, LoginManager from .protocol import LoginManagerProtocol __all__ = ( "LoginManager", - "FuncxScopes", + "ComputeScopes", "LoginManagerProtocol", "requires_login", ) diff --git a/funcx_sdk/funcx/sdk/login_manager/client_login.py b/compute_sdk/globus_compute_sdk/sdk/login_manager/client_login.py similarity index 93% rename from funcx_sdk/funcx/sdk/login_manager/client_login.py rename to compute_sdk/globus_compute_sdk/sdk/login_manager/client_login.py index a15985fa0..dcc5b542d 100644 --- a/funcx_sdk/funcx/sdk/login_manager/client_login.py +++ b/compute_sdk/globus_compute_sdk/sdk/login_manager/client_login.py @@ -1,5 +1,5 @@ """ -Logic for using client identities with the funcX SDK. +Logic for using client identities with the Globus Compute SDK. This is based on the Globus CLI client login: https://github.com/globus/globus-cli/blob/main/src/globus_cli/login_manager/client_login.py @@ -24,7 +24,7 @@ def _get_client_creds_from_env() -> tuple[str | None, str | None]: def is_client_login() -> bool: """ Return True if the correct env variables have been set to use a - client identity with the funcX SDK + client identity with the Globus Compute SDK """ client_id, client_secret = _get_client_creds_from_env() diff --git a/funcx_sdk/funcx/sdk/login_manager/decorators.py b/compute_sdk/globus_compute_sdk/sdk/login_manager/decorators.py similarity index 91% rename from funcx_sdk/funcx/sdk/login_manager/decorators.py rename to compute_sdk/globus_compute_sdk/sdk/login_manager/decorators.py index f2e9026c9..0285c6a42 100644 --- a/funcx_sdk/funcx/sdk/login_manager/decorators.py +++ b/compute_sdk/globus_compute_sdk/sdk/login_manager/decorators.py @@ -23,7 +23,7 @@ def wrapper(self, *args, **kwargs): ) self.login_manager.run_login_flow() # Initiate a new web client with updated authorizer - self.web_client = self.login_manager.get_funcx_web_client( + self.web_client = self.login_manager.get_web_client( base_url=self.funcx_service_address ) return func(self, *args, **kwargs) diff --git a/compute_sdk/globus_compute_sdk/sdk/login_manager/globus_auth.py b/compute_sdk/globus_compute_sdk/sdk/login_manager/globus_auth.py new file mode 100644 index 000000000..b964b0ddd --- /dev/null +++ b/compute_sdk/globus_compute_sdk/sdk/login_manager/globus_auth.py @@ -0,0 +1,16 @@ +import os + +import globus_sdk + + +def internal_auth_client(): + """ + This is the client that represents the Globus Compute application itself + """ + + client_id = os.environ.get( + "FUNCX_SDK_CLIENT_ID", "4cf29807-cf21-49ec-9443-ff9a3fb9f81c" + ) + return globus_sdk.NativeAppAuthClient( + client_id, app_name="Globus Compute (internal client)" + ) diff --git a/funcx_sdk/funcx/sdk/login_manager/login_flow.py b/compute_sdk/globus_compute_sdk/sdk/login_manager/login_flow.py similarity index 100% rename from funcx_sdk/funcx/sdk/login_manager/login_flow.py rename to compute_sdk/globus_compute_sdk/sdk/login_manager/login_flow.py diff --git a/funcx_sdk/funcx/sdk/login_manager/manager.py b/compute_sdk/globus_compute_sdk/sdk/login_manager/manager.py similarity index 93% rename from funcx_sdk/funcx/sdk/login_manager/manager.py rename to compute_sdk/globus_compute_sdk/sdk/login_manager/manager.py index 58136edde..3b67f75df 100644 --- a/funcx_sdk/funcx/sdk/login_manager/manager.py +++ b/compute_sdk/globus_compute_sdk/sdk/login_manager/manager.py @@ -9,7 +9,7 @@ import globus_sdk from globus_sdk.scopes import AuthScopes, ScopeBuilder -from ..web_client import FuncxWebClient +from ..web_client import WebClient from .client_login import get_client_login, is_client_login from .globus_auth import internal_auth_client from .login_flow import do_link_auth_flow @@ -25,7 +25,7 @@ def _get_funcx_all_scope() -> str: ) -class FuncxScopeBuilder(ScopeBuilder): +class ComputeScopeBuilder(ScopeBuilder): # FIXME: # for some reason, the funcx resource server name on the production scope is # "funcx_service" even though this doesn't match the resource server ID and the @@ -37,9 +37,9 @@ def __init__(self): self.all = _get_funcx_all_scope() -#: a ScopeBuilder in the style of globus_sdk.scopes for the FuncX service -#: it supports one scope named 'all', as in ``FuncxScopes.all`` -FuncxScopes = FuncxScopeBuilder() +#: a ScopeBuilder in the style of globus_sdk.scopes for the Globus Compute service +#: it supports one scope named 'all', as in ``ComputeScopes.all`` +ComputeScopes = ComputeScopeBuilder() class LoginManager: @@ -57,7 +57,7 @@ class LoginManager: """ SCOPES: dict[str, list[str]] = { - FuncxScopes.resource_server: [FuncxScopes.all], + ComputeScopes.resource_server: [ComputeScopes.all], AuthScopes.resource_server: [AuthScopes.openid], } @@ -184,11 +184,11 @@ def get_auth_client(self) -> globus_sdk.AuthClient: authorizer=self._get_authorizer(AuthScopes.resource_server) ) - def get_funcx_web_client( + def get_web_client( self, *, base_url: str | None = None, app_name: str | None = None - ) -> FuncxWebClient: - return FuncxWebClient( + ) -> WebClient: + return WebClient( base_url=base_url, app_name=app_name, - authorizer=self._get_authorizer(FuncxScopes.resource_server), + authorizer=self._get_authorizer(ComputeScopes.resource_server), ) diff --git a/funcx_sdk/funcx/sdk/login_manager/protocol.py b/compute_sdk/globus_compute_sdk/sdk/login_manager/protocol.py similarity index 82% rename from funcx_sdk/funcx/sdk/login_manager/protocol.py rename to compute_sdk/globus_compute_sdk/sdk/login_manager/protocol.py index 2c5547b3d..e00c721f5 100644 --- a/funcx_sdk/funcx/sdk/login_manager/protocol.py +++ b/compute_sdk/globus_compute_sdk/sdk/login_manager/protocol.py @@ -4,7 +4,7 @@ import globus_sdk -from ..web_client import FuncxWebClient +from ..web_client import WebClient # these were added to stdlib typing in 3.8, so the import must be conditional # mypy and other tools expect and document a sys.version_info check @@ -25,5 +25,5 @@ def logout(self) -> bool: def get_auth_client(self) -> globus_sdk.AuthClient: ... - def get_funcx_web_client(self, *, base_url: str | None = None) -> FuncxWebClient: + def get_web_client(self, *, base_url: str | None = None) -> WebClient: ... diff --git a/funcx_sdk/funcx/sdk/login_manager/tokenstore.py b/compute_sdk/globus_compute_sdk/sdk/login_manager/tokenstore.py similarity index 100% rename from funcx_sdk/funcx/sdk/login_manager/tokenstore.py rename to compute_sdk/globus_compute_sdk/sdk/login_manager/tokenstore.py diff --git a/funcx_sdk/funcx/sdk/login_manager/whoami.py b/compute_sdk/globus_compute_sdk/sdk/login_manager/whoami.py similarity index 94% rename from funcx_sdk/funcx/sdk/login_manager/whoami.py rename to compute_sdk/globus_compute_sdk/sdk/login_manager/whoami.py index 331884148..bff47306e 100644 --- a/funcx_sdk/funcx/sdk/login_manager/whoami.py +++ b/compute_sdk/globus_compute_sdk/sdk/login_manager/whoami.py @@ -1,10 +1,9 @@ import logging +from globus_compute_sdk.sdk.login_manager import LoginManager +from globus_compute_sdk.sdk.utils.printing import print_table from globus_sdk import AuthAPIError -from funcx.sdk.login_manager import LoginManager -from funcx.sdk.utils.printing import print_table - NOT_LOGGED_IN_MSG = "Unable to retrieve user information. Please log in again." logger = logging.getLogger(__name__) diff --git a/funcx_sdk/funcx/sdk/utils/__init__.py b/compute_sdk/globus_compute_sdk/sdk/utils/__init__.py similarity index 100% rename from funcx_sdk/funcx/sdk/utils/__init__.py rename to compute_sdk/globus_compute_sdk/sdk/utils/__init__.py diff --git a/funcx_sdk/funcx/sdk/utils/printing.py b/compute_sdk/globus_compute_sdk/sdk/utils/printing.py similarity index 100% rename from funcx_sdk/funcx/sdk/utils/printing.py rename to compute_sdk/globus_compute_sdk/sdk/utils/printing.py diff --git a/funcx_sdk/funcx/sdk/web_client.py b/compute_sdk/globus_compute_sdk/sdk/web_client.py similarity index 91% rename from funcx_sdk/funcx/sdk/web_client.py rename to compute_sdk/globus_compute_sdk/sdk/web_client.py index 77fa038d2..1aa11dd1a 100644 --- a/funcx_sdk/funcx/sdk/web_client.py +++ b/compute_sdk/globus_compute_sdk/sdk/web_client.py @@ -1,6 +1,6 @@ """ -This module implements a Globus SDK client class suitable for use with the FuncX web -service. +This module implements a Globus SDK client class suitable for use with the +Globus Compute web service. It also implements data helpers for building complex payloads. Most notably, `FunctionRegistrationData` which can be constructed from an arbitrary callable. @@ -10,20 +10,19 @@ import uuid import globus_sdk -from funcx_common.sdk_version_sharing import user_agent_substring +from globus_compute_common.sdk_version_sharing import user_agent_substring +from globus_compute_sdk.sdk._environments import get_web_service_url +from globus_compute_sdk.serialize import ComputeSerializer +from globus_compute_sdk.version import __version__ from globus_sdk.exc.api import GlobusAPIError -from funcx.sdk._environments import get_web_service_url -from funcx.serialize import FuncXSerializer -from funcx.version import __version__ - ID_PARAM_T = t.Union[uuid.UUID, str] def _get_packed_code( - func: t.Callable, serializer: t.Optional[FuncXSerializer] = None + func: t.Callable, serializer: t.Optional[ComputeSerializer] = None ) -> str: - serializer = serializer if serializer else FuncXSerializer() + serializer = serializer if serializer else ComputeSerializer() return serializer.pack_buffers([serializer.serialize(func)]) @@ -39,7 +38,7 @@ def __init__( description: t.Optional[str] = None, public: bool = False, group: t.Optional[str] = None, - serializer: t.Optional[FuncXSerializer] = None, + serializer: t.Optional[ComputeSerializer] = None, ): if function is not None: function_name = function.__name__ @@ -76,13 +75,13 @@ def __str__(self): return "FunctionRegistrationData(" + json.dumps(self.to_dict()) + ")" -class FuncxWebClient(globus_sdk.BaseClient): +class WebClient(globus_sdk.BaseClient): # the `service_name` is used in the Globus SDK to lookup the service URL from - # config. However, FuncX has its own logic for determining the base URL. + # config. However, Globus Compute has its own logic for determining the base URL. # set `service_name` to allow the check which ensures this is set to pass # it does not have any other effects service_name: str = "funcx" - # use the FuncX-specific error class + # use the Globus Compute-specific error class error_class = GlobusAPIError def __init__( @@ -123,7 +122,7 @@ def get_batch_status( data.update(additional_fields) return self.post("/batch_status", data=data) - # the FuncXClient needs to send version information through BaseClient.app_name, + # the Client needs to send version information through BaseClient.app_name, # so that's overridden here to prevent direct manipulation. use user_app_name # instead to send any custom metadata through the User Agent request header @property diff --git a/compute_sdk/globus_compute_sdk/serialize/__init__.py b/compute_sdk/globus_compute_sdk/serialize/__init__.py new file mode 100644 index 000000000..986fa124b --- /dev/null +++ b/compute_sdk/globus_compute_sdk/serialize/__init__.py @@ -0,0 +1,3 @@ +from globus_compute_sdk.serialize.facade import ComputeSerializer + +__all__ = ("ComputeSerializer",) diff --git a/funcx_sdk/funcx/serialize/base.py b/compute_sdk/globus_compute_sdk/serialize/base.py similarity index 100% rename from funcx_sdk/funcx/serialize/base.py rename to compute_sdk/globus_compute_sdk/serialize/base.py diff --git a/funcx_sdk/funcx/serialize/concretes.py b/compute_sdk/globus_compute_sdk/serialize/concretes.py similarity index 98% rename from funcx_sdk/funcx/serialize/concretes.py rename to compute_sdk/globus_compute_sdk/serialize/concretes.py index e2580938c..8e33edd23 100644 --- a/funcx_sdk/funcx/serialize/concretes.py +++ b/compute_sdk/globus_compute_sdk/serialize/concretes.py @@ -7,8 +7,7 @@ from collections import OrderedDict import dill - -from funcx.serialize.base import DeserializationError, SerializeBase +from globus_compute_sdk.serialize.base import DeserializationError, SerializeBase logger = logging.getLogger(__name__) diff --git a/funcx_sdk/funcx/serialize/facade.py b/compute_sdk/globus_compute_sdk/serialize/facade.py similarity index 97% rename from funcx_sdk/funcx/serialize/facade.py rename to compute_sdk/globus_compute_sdk/serialize/facade.py index acdaf5a5d..a0af4dba9 100644 --- a/funcx_sdk/funcx/serialize/facade.py +++ b/compute_sdk/globus_compute_sdk/serialize/facade.py @@ -1,11 +1,11 @@ import logging -from funcx.serialize.concretes import METHODS_MAP_CODE, METHODS_MAP_DATA +from globus_compute_sdk.serialize.concretes import METHODS_MAP_CODE, METHODS_MAP_DATA logger = logging.getLogger(__name__) -class FuncXSerializer: +class ComputeSerializer: """Wraps several serializers for one uniform interface""" def __init__(self): diff --git a/funcx_sdk/funcx/utils.py b/compute_sdk/globus_compute_sdk/utils.py similarity index 100% rename from funcx_sdk/funcx/utils.py rename to compute_sdk/globus_compute_sdk/utils.py diff --git a/funcx_sdk/funcx/version.py b/compute_sdk/globus_compute_sdk/version.py similarity index 81% rename from funcx_sdk/funcx/version.py rename to compute_sdk/globus_compute_sdk/version.py index e439ad1b8..79686c6ef 100644 --- a/funcx_sdk/funcx/version.py +++ b/compute_sdk/globus_compute_sdk/version.py @@ -1,14 +1,13 @@ +from globus_compute_sdk.errors import VersionMismatch from packaging.version import Version -from funcx.errors import VersionMismatch - # single source of truth for package version, # see https://packaging.python.org/en/latest/single_source_version/ -__version__ = "1.0.13" +__version__ = "2.0.0" def compare_versions( - current: str, min_version: str, *, package_name: str = "funcx" + current: str, min_version: str, *, package_name: str = "globus-compute-sdk" ) -> None: current_v = Version(current) min_v = Version(min_version) diff --git a/funcx_sdk/parsl/app/errors.py b/compute_sdk/parsl/app/errors.py similarity index 94% rename from funcx_sdk/parsl/app/errors.py rename to compute_sdk/parsl/app/errors.py index eea085391..11c670bf4 100644 --- a/funcx_sdk/parsl/app/errors.py +++ b/compute_sdk/parsl/app/errors.py @@ -42,7 +42,7 @@ def wrap_error( def wrapper(*args: object, **kwargs: object) -> Any: import sys - from funcx.serialize.errors import RemoteExceptionWrapper + from globus_compute_sdk.serialize.errors import RemoteExceptionWrapper try: return func(*args, **kwargs) # type: ignore diff --git a/funcx_sdk/setup.cfg b/compute_sdk/setup.cfg similarity index 78% rename from funcx_sdk/setup.cfg rename to compute_sdk/setup.cfg index f1d2cf498..645a5f071 100644 --- a/funcx_sdk/setup.cfg +++ b/compute_sdk/setup.cfg @@ -1,6 +1,6 @@ [isort] profile = black -known_first_party = funcx,funcx_endpoint +known_first_party = globus-compute-sdk, globus-compute-endpoint [flake8] # config to be black-compatible diff --git a/funcx_sdk/setup.py b/compute_sdk/setup.py similarity index 82% rename from funcx_sdk/setup.py rename to compute_sdk/setup.py index 66780766e..804b94558 100644 --- a/funcx_sdk/setup.py +++ b/compute_sdk/setup.py @@ -7,6 +7,7 @@ # request sending and authorization tools "requests>=2.20.0", "globus-sdk>=3.14.0,<4", + "globus-compute-common==0.1.0", # 'websockets' is used for the client-side websocket listener "websockets==10.3", # dill is an extension of `pickle` to a wider array of native python types @@ -19,7 +20,6 @@ # set a version floor but no ceiling as the library offers a stable API under CalVer "packaging>=21.1", "pika>=1.2", - "funcx-common==0.0.25", "tblib==1.7.0", "texttable>=1.6.7", ] @@ -45,7 +45,7 @@ def parse_version(): # single source of truth for package version version_string = "" version_pattern = re.compile(r'__version__ = "([^"]*)"') - with open(os.path.join("funcx", "version.py")) as f: + with open(os.path.join("globus_compute_sdk", "version.py")) as f: for line in f: match = version_pattern.match(line) if match: @@ -57,10 +57,12 @@ def parse_version(): setup( - name="funcx", + name="globus-compute-sdk", version=parse_version(), - packages=find_namespace_packages(include=["funcx", "funcx.*"]), - description="funcX: High Performance Function Serving for Science", + packages=find_namespace_packages( + include=["globus_compute_sdk", "globus_compute_sdk.*"] + ), + description="Globus Compute: High Performance Function Serving for Science", install_requires=REQUIRES, extras_require={ "dev": DEV_REQUIRES, @@ -77,9 +79,9 @@ def parse_version(): "Programming Language :: Python :: 3", "Topic :: Scientific/Engineering", ], - keywords=["funcX", "FaaS", "Function Serving"], - author="funcX team", - author_email="labs@globus.org", + keywords=["Globus Compute", "FaaS", "Function Serving"], + author="Globus Compute Team", + author_email="support@globus.org", license="Apache License, Version 2.0", url="https://github.com/funcx-faas/funcx", ) diff --git a/funcx_sdk/tests/__init__.py b/compute_sdk/tests/__init__.py similarity index 100% rename from funcx_sdk/tests/__init__.py rename to compute_sdk/tests/__init__.py diff --git a/funcx_sdk/tests/conftest.py b/compute_sdk/tests/conftest.py similarity index 80% rename from funcx_sdk/tests/conftest.py rename to compute_sdk/tests/conftest.py index eabb0c978..923eae5ba 100644 --- a/funcx_sdk/tests/conftest.py +++ b/compute_sdk/tests/conftest.py @@ -1,6 +1,5 @@ import pytest - -from funcx import FuncXClient +from globus_compute_sdk import Client config = { "funcx_service_address": "https://api2.funcx.org/v2", @@ -11,7 +10,7 @@ @pytest.fixture(autouse=True, scope="session") def load_funcx_session(request, pytestconfig): - """Load funcX sdk client for the entire test suite, + """Load Globus Compute sdk client for the entire test suite, The special path `local` indicates that configuration will not come from a pytest managed configuration file; in that case, see @@ -38,7 +37,7 @@ def pytest_addoption(parser): metavar="service-address", nargs=1, default=[config["funcx_service_address"]], - help="Specify a funcX service address", + help="Specify a Globus Compute service address", ) parser.addoption( @@ -52,22 +51,22 @@ def pytest_addoption(parser): @pytest.fixture -def fxc_args(pytestconfig): - fxc_args = { +def compute_client_args(pytestconfig): + gcc_args = { "funcx_service_address": pytestconfig.getoption("--service-address")[0], "results_ws_uri": pytestconfig.getoption("--ws-uri")[0], } - return fxc_args + return gcc_args @pytest.fixture -def fxc(fxc_args): - fxc = FuncXClient(**fxc_args) - return fxc +def compute_client(compute_client_args): + gcc = Client(**compute_client_args) + return gcc @pytest.fixture def login_manager(mocker): mock_login_manager = mocker.Mock() - mock_login_manager.get_funcx_web_client = mocker.Mock + mock_login_manager.get_web_client = mocker.Mock return mock_login_manager diff --git a/funcx_sdk/tests/integration/test_executor_int.py b/compute_sdk/tests/integration/test_executor_int.py similarity index 58% rename from funcx_sdk/tests/integration/test_executor_int.py rename to compute_sdk/tests/integration/test_executor_int.py index 66acf87d2..ce0a21893 100644 --- a/funcx_sdk/tests/integration/test_executor_int.py +++ b/compute_sdk/tests/integration/test_executor_int.py @@ -5,20 +5,19 @@ from unittest import mock import pytest +from globus_compute_sdk import Client +from globus_compute_sdk.sdk.executor import Executor, _ResultWatcher from tests.utils import try_assert -from funcx import FuncXClient -from funcx.sdk.executor import FuncXExecutor, _ResultWatcher - @pytest.mark.skipif( - not os.getenv("FUNCX_INTEGRATION_TEST_WEB_URL"), reason="no integration web url" + not os.getenv("COMPUTE_INTEGRATION_TEST_WEB_URL"), reason="no integration web url" ) def test_resultwatcher_graceful_shutdown(): - service_url = os.environ["FUNCX_INTEGRATION_TEST_WEB_URL"] - fxc = FuncXClient(funcx_service_address=service_url) - fxe = FuncXExecutor(funcx_client=fxc) - rw = _ResultWatcher(fxe) + service_url = os.environ["COMPUTE_INTEGRATION_TEST_WEB_URL"] + gcc = Client(funcx_service_address=service_url) + gce = Executor(funcx_client=gcc) + rw = _ResultWatcher(gce) rw._start_consuming = mock.Mock() rw.start() @@ -28,7 +27,7 @@ def test_resultwatcher_graceful_shutdown(): try_assert(lambda: rw._channel is None) try_assert(lambda: not rw._connection or rw._connection.is_closed) try_assert(lambda: not rw.is_alive()) - fxe.shutdown() + gce.shutdown() def test_executor_atexit_handler_catches_all_instances(tmp_path): @@ -36,21 +35,21 @@ def test_executor_atexit_handler_catches_all_instances(tmp_path): script_content = textwrap.dedent( """ import random - from funcx import FuncXExecutor - from funcx.sdk.executor import _REGISTERED_FXEXECUTORS + from globus_compute_sdk import Executor + from globus_compute_sdk.sdk.executor import _REGISTERED_FXEXECUTORS - fxc = " a fake funcx_client" + gcc = " a fake funcx_client" num_executors = random.randrange(1, 10) for i in range(num_executors): - FuncXExecutor(funcx_client=fxc) # start N threads, none shutdown - fxe = FuncXExecutor(funcx_client=fxc) # intentionally overwritten - fxe = FuncXExecutor(funcx_client=fxc) + Executor(funcx_client=gcc) # start N threads, none shutdown + gce = Executor(funcx_client=gcc) # intentionally overwritten + gce = Executor(funcx_client=gcc) num_executors += 2 assert len(_REGISTERED_FXEXECUTORS) == num_executors, ( f"Verify test setup: {len(_REGISTERED_FXEXECUTORS)} != {num_executors}" ) - fxe.shutdown() # only shutting down _last_ instance. Should still exit cleanly + gce.shutdown() # only shutting down _last_ instance. Should still exit cleanly """ ) test_script.write_text(script_content) diff --git a/funcx_sdk/tests/integration/test_serialization.py b/compute_sdk/tests/integration/test_serialization.py similarity index 97% rename from funcx_sdk/tests/integration/test_serialization.py rename to compute_sdk/tests/integration/test_serialization.py index 56c0ad359..c0497378a 100644 --- a/funcx_sdk/tests/integration/test_serialization.py +++ b/compute_sdk/tests/integration/test_serialization.py @@ -1,10 +1,9 @@ import inspect import sys +import globus_compute_sdk.serialize.concretes as concretes import pytest -import funcx.serialize.concretes as concretes - def foo(x, y=3): return x * y @@ -234,10 +233,10 @@ def test_code_dill_source(): def test_overall(): - from funcx.serialize.facade import FuncXSerializer + from globus_compute_sdk.serialize.facade import ComputeSerializer - check_serialize_deserialize_foo(FuncXSerializer()) - check_serialize_deserialize_bar(FuncXSerializer()) + check_serialize_deserialize_foo(ComputeSerializer()) + check_serialize_deserialize_bar(ComputeSerializer()) def test_serialize_deserialize_combined(): diff --git a/funcx_sdk/tests/integration/test_web_client_exceptions.py b/compute_sdk/tests/integration/test_web_client_exceptions.py similarity index 87% rename from funcx_sdk/tests/integration/test_web_client_exceptions.py rename to compute_sdk/tests/integration/test_web_client_exceptions.py index be07d85a4..9829d41d4 100644 --- a/funcx_sdk/tests/integration/test_web_client_exceptions.py +++ b/compute_sdk/tests/integration/test_web_client_exceptions.py @@ -1,9 +1,8 @@ import pytest import responses +from globus_compute_sdk.sdk.web_client import WebClient from globus_sdk.exc.api import GlobusAPIError -from funcx.sdk.web_client import FuncxWebClient - @pytest.fixture(autouse=True) def mocked_responses(): @@ -22,9 +21,7 @@ def mocked_responses(): @pytest.fixture def client(): # for the default test client, set a fake URL and disable retries - return FuncxWebClient( - base_url="https://api.funcx", transport_params={"max_retries": 0} - ) + return WebClient(base_url="https://api.funcx", transport_params={"max_retries": 0}) @pytest.mark.parametrize("http_status", [400, 500]) diff --git a/funcx_sdk/tests/unit/conftest.py b/compute_sdk/tests/unit/conftest.py similarity index 100% rename from funcx_sdk/tests/unit/conftest.py rename to compute_sdk/tests/unit/conftest.py diff --git a/funcx_sdk/tests/unit/test_atomiccontroller.py b/compute_sdk/tests/unit/test_atomiccontroller.py similarity index 96% rename from funcx_sdk/tests/unit/test_atomiccontroller.py rename to compute_sdk/tests/unit/test_atomiccontroller.py index cf8d93085..c4ab2e824 100644 --- a/funcx_sdk/tests/unit/test_atomiccontroller.py +++ b/compute_sdk/tests/unit/test_atomiccontroller.py @@ -1,6 +1,6 @@ import random -from funcx.sdk.executor import AtomicController +from globus_compute_sdk.sdk.executor import AtomicController class DummyTarget: diff --git a/funcx_sdk/tests/unit/test_client.py b/compute_sdk/tests/unit/test_client.py similarity index 66% rename from funcx_sdk/tests/unit/test_client.py rename to compute_sdk/tests/unit/test_client.py index b3a0710a7..8dcfc24bc 100644 --- a/funcx_sdk/tests/unit/test_client.py +++ b/compute_sdk/tests/unit/test_client.py @@ -1,12 +1,11 @@ import uuid from unittest import mock +import globus_compute_sdk as gc import pytest - -import funcx -from funcx import ContainerSpec -from funcx.errors import FuncxTaskExecutionFailed -from funcx.serialize import FuncXSerializer +from globus_compute_sdk import ContainerSpec +from globus_compute_sdk.errors import TaskExecutionFailed +from globus_compute_sdk.serialize import ComputeSerializer @pytest.fixture(autouse=True) @@ -46,10 +45,10 @@ def test_client_init_sets_addresses_by_env( # create the client, either with just the input env or with explicit parameters # for explicit params, alter the expected URI(s) if not explicit_params: - client = funcx.FuncXClient(**kwargs) + client = gc.Client(**kwargs) elif explicit_params == "web": web_uri = f"http://{randomstring()}.fqdn:1234/{randomstring()}" - client = funcx.FuncXClient(funcx_service_address=web_uri, **kwargs) + client = gc.Client(funcx_service_address=web_uri, **kwargs) else: raise NotImplementedError @@ -59,12 +58,12 @@ def test_client_init_sets_addresses_by_env( def test_client_init_accepts_specified_taskgroup(): tg_uuid = uuid.uuid4() - fxc = funcx.FuncXClient( + gcc = gc.Client( task_group_id=tg_uuid, do_version_check=False, login_manager=mock.Mock(), ) - assert fxc.session_task_group_id == str(tg_uuid) + assert gcc.session_task_group_id == str(tg_uuid) @pytest.mark.parametrize( @@ -76,31 +75,31 @@ def test_client_init_accepts_specified_taskgroup(): ], ) def test_update_task_table_on_invalid_data(api_data): - fxc = funcx.FuncXClient(do_version_check=False, login_manager=mock.Mock()) + gcc = gc.Client(do_version_check=False, login_manager=mock.Mock()) with pytest.raises(ValueError): - fxc._update_task_table(api_data, "task-id-foo") + gcc._update_task_table(api_data, "task-id-foo") def test_update_task_table_on_exception(): api_data = {"status": "success", "exception": "foo-bar-baz", "completion_t": "1.1"} - fxc = funcx.FuncXClient(do_version_check=False, login_manager=mock.Mock()) + gcc = gc.Client(do_version_check=False, login_manager=mock.Mock()) - with pytest.raises(FuncxTaskExecutionFailed) as excinfo: - fxc._update_task_table(api_data, "task-id-foo") + with pytest.raises(TaskExecutionFailed) as excinfo: + gcc._update_task_table(api_data, "task-id-foo") assert "foo-bar-baz" in str(excinfo.value) def test_update_task_table_simple_object(randomstring): - serde = FuncXSerializer() - fxc = funcx.FuncXClient(do_version_check=False, login_manager=mock.Mock()) + serde = ComputeSerializer() + gcc = gc.Client(do_version_check=False, login_manager=mock.Mock()) task_id = "some_task_id" payload = randomstring() data = {"status": "success", "completion_t": "1.1"} data["result"] = serde.serialize(payload) - st = fxc._update_task_table(data, task_id) + st = gcc._update_task_table(data, task_id) assert not st["pending"] assert st["result"] == payload assert "exception" not in st @@ -111,17 +110,17 @@ def test_pending_tasks_always_fetched(): should_fetch_02 = str(uuid.uuid4()) no_fetch = str(uuid.uuid4()) - fxc = funcx.FuncXClient(do_version_check=False, login_manager=mock.Mock()) - fxc.web_client = mock.MagicMock() - fxc._task_status_table.update( + gcc = gc.Client(do_version_check=False, login_manager=mock.Mock()) + gcc.web_client = mock.MagicMock() + gcc._task_status_table.update( {should_fetch_01: {"pending": True}, no_fetch: {"pending": False}} ) task_id_list = [no_fetch, should_fetch_01, should_fetch_02] # bulk avenue - fxc.get_batch_result(task_id_list) + gcc.get_batch_result(task_id_list) - args, _ = fxc.web_client.get_batch_status.call_args + args, _ = gcc.web_client.get_batch_status.call_args assert should_fetch_01 in args[0] assert should_fetch_02 in args[0] assert no_fetch not in args[0] @@ -132,11 +131,11 @@ def test_pending_tasks_always_fetched(): (True, should_fetch_02), (False, no_fetch), ): - fxc.web_client.get_task.reset_mock() - fxc.get_task(sf) - assert should_fetch is fxc.web_client.get_task.called + gcc.web_client.get_task.reset_mock() + gcc.get_task(sf) + assert should_fetch is gcc.web_client.get_task.called if should_fetch: - args, _ = fxc.web_client.get_task.call_args + args, _ = gcc.web_client.get_task.call_args assert sf == args[0] @@ -145,20 +144,20 @@ def test_batch_created_websocket_queue(create_ws_queue): eid = str(uuid.uuid4()) fid = str(uuid.uuid4()) - fxc = funcx.FuncXClient(do_version_check=False, login_manager=mock.Mock()) - fxc.web_client = mock.MagicMock() + gcc = gc.Client(do_version_check=False, login_manager=mock.Mock()) + gcc.web_client = mock.MagicMock() if create_ws_queue is None: - batch = fxc.create_batch() + batch = gcc.create_batch() else: - batch = fxc.create_batch(create_websocket_queue=create_ws_queue) + batch = gcc.create_batch(create_websocket_queue=create_ws_queue) batch.add(fid, eid, (1,)) batch.add(fid, eid, (2,)) - fxc.batch_run(batch) + gcc.batch_run(batch) - assert fxc.web_client.submit.called - submit_data = fxc.web_client.submit.call_args[0][0] + assert gcc.web_client.submit.called + submit_data = gcc.web_client.submit.call_args[0][0] assert "create_websocket_queue" in submit_data if create_ws_queue: assert submit_data["create_websocket_queue"] is True @@ -167,8 +166,8 @@ def test_batch_created_websocket_queue(create_ws_queue): def test_batch_error(): - fxc = funcx.FuncXClient(do_version_check=False, login_manager=mock.Mock()) - fxc.web_client = mock.MagicMock() + gcc = gc.Client(do_version_check=False, login_manager=mock.Mock()) + gcc.web_client = mock.MagicMock() error_reason = "reason 1 2 3" error_results = { @@ -188,20 +187,20 @@ def test_batch_error(): ], "task_group_id": "tg_id", } - fxc.web_client.submit = mock.MagicMock(return_value=error_results) + gcc.web_client.submit = mock.MagicMock(return_value=error_results) - batch = fxc.create_batch() + batch = gcc.create_batch() batch.add("fid1", "eid1", "arg1") batch.add("fid2", "eid2", "arg2") - with pytest.raises(FuncxTaskExecutionFailed) as excinfo: - fxc.batch_run(batch) + with pytest.raises(TaskExecutionFailed) as excinfo: + gcc.batch_run(batch) assert error_reason in str(excinfo) def test_batch_no_reason(): - fxc = funcx.FuncXClient(do_version_check=False, login_manager=mock.Mock()) - fxc.web_client = mock.MagicMock() + gcc = gc.Client(do_version_check=False, login_manager=mock.Mock()) + gcc.web_client = mock.MagicMock() error_results = { "response": "batch", @@ -214,10 +213,10 @@ def test_batch_no_reason(): ], "task_group_id": "tg_id", } - fxc.web_client.submit = mock.MagicMock(return_value=error_results) + gcc.web_client.submit = mock.MagicMock(return_value=error_results) - with pytest.raises(FuncxTaskExecutionFailed) as excinfo: - fxc.run(endpoint_id="fid", function_id="fid") + with pytest.raises(TaskExecutionFailed) as excinfo: + gcc.run(endpoint_id="fid", function_id="fid") assert "Unknown execution failure" in str(excinfo) @@ -225,13 +224,13 @@ def test_batch_no_reason(): @pytest.mark.parametrize("asynchronous", [True, False, None]) def test_single_run_websocket_queue_depend_async(asynchronous): if asynchronous is None: - fxc = funcx.FuncXClient(do_version_check=False, login_manager=mock.Mock()) + gcc = gc.Client(do_version_check=False, login_manager=mock.Mock()) else: - fxc = funcx.FuncXClient( + gcc = gc.Client( asynchronous=asynchronous, do_version_check=False, login_manager=mock.Mock() ) - fxc.web_client = mock.MagicMock() + gcc.web_client = mock.MagicMock() fake_results = { "results": [ @@ -242,11 +241,11 @@ def test_single_run_websocket_queue_depend_async(asynchronous): ], "task_group_id": str(uuid.uuid4()), } - fxc.web_client.submit = mock.MagicMock(return_value=fake_results) - fxc.run(endpoint_id=str(uuid.uuid4()), function_id=str(uuid.uuid4())) + gcc.web_client.submit = mock.MagicMock(return_value=fake_results) + gcc.run(endpoint_id=str(uuid.uuid4()), function_id=str(uuid.uuid4())) - assert fxc.web_client.submit.called - submit_data = fxc.web_client.submit.call_args[0][0] + assert gcc.web_client.submit.called + submit_data = gcc.web_client.submit.call_args[0][0] assert "create_websocket_queue" in submit_data if asynchronous: assert submit_data["create_websocket_queue"] is True @@ -257,8 +256,8 @@ def test_single_run_websocket_queue_depend_async(asynchronous): def test_build_container(mocker, login_manager): mock_data = mocker.Mock() mock_data.data = {"container_id": "123-456"} - login_manager.get_funcx_web_client.post = mocker.Mock(return_value=mock_data) - fxc = funcx.FuncXClient(do_version_check=False, login_manager=login_manager) + login_manager.get_web_client.post = mocker.Mock(return_value=mock_data) + gcc = gc.Client(do_version_check=False, login_manager=login_manager) spec = ContainerSpec( name="MyContainer", pip=[ @@ -269,10 +268,10 @@ def test_build_container(mocker, login_manager): payload_url="https://github.com/funcx-faas/funcx-container-service.git", ) - container_id = fxc.build_container(spec) + container_id = gcc.build_container(spec) assert container_id == "123-456" - login_manager.get_funcx_web_client.post.assert_called() - calls = login_manager.get_funcx_web_client.post.call_args + login_manager.get_web_client.post.assert_called() + calls = login_manager.get_web_client.post.call_args assert calls[0][0] == "containers/build" assert calls[1] == {"data": spec.to_json()} @@ -285,9 +284,9 @@ def __init__(self): self["status"] = expected_status self.http_status = 200 - login_manager.get_funcx_web_client.get = mocker.Mock(return_value=MockData()) - fxc = funcx.FuncXClient(do_version_check=False, login_manager=login_manager) - status = fxc.get_container_build_status("123-434") + login_manager.get_web_client.get = mocker.Mock(return_value=MockData()) + gcc = gc.Client(do_version_check=False, login_manager=login_manager) + status = gcc.get_container_build_status("123-434") assert status == expected_status @@ -296,11 +295,11 @@ class MockData(dict): def __init__(self): self.http_status = 404 - login_manager.get_funcx_web_client.get = mocker.Mock(return_value=MockData()) - fxc = funcx.FuncXClient(do_version_check=False, login_manager=login_manager) + login_manager.get_web_client.get = mocker.Mock(return_value=MockData()) + gcc = gc.Client(do_version_check=False, login_manager=login_manager) with pytest.raises(ValueError) as excinfo: - fxc.get_container_build_status("123-434") + gcc.get_container_build_status("123-434") assert excinfo.value.args[0] == "Container ID 123-434 not found" @@ -311,10 +310,10 @@ def __init__(self): self.http_status = 500 self.http_reason = "This is a reason" - login_manager.get_funcx_web_client.get = mocker.Mock(return_value=MockData()) - fxc = funcx.FuncXClient(do_version_check=False, login_manager=login_manager) + login_manager.get_web_client.get = mocker.Mock(return_value=MockData()) + gcc = gc.Client(do_version_check=False, login_manager=login_manager) with pytest.raises(SystemError) as excinfo: - fxc.get_container_build_status("123-434") + gcc.get_container_build_status("123-434") assert type(excinfo.value) == SystemError diff --git a/funcx_sdk/tests/unit/test_container_spec.py b/compute_sdk/tests/unit/test_container_spec.py similarity index 93% rename from funcx_sdk/tests/unit/test_container_spec.py rename to compute_sdk/tests/unit/test_container_spec.py index 4ca4a0127..7d17023e5 100644 --- a/funcx_sdk/tests/unit/test_container_spec.py +++ b/compute_sdk/tests/unit/test_container_spec.py @@ -1,6 +1,5 @@ import pytest as pytest - -from funcx.sdk.container_spec import ContainerSpec +from globus_compute_sdk.sdk.container_spec import ContainerSpec def test_constructor(): diff --git a/funcx_sdk/tests/unit/test_environment_lookups.py b/compute_sdk/tests/unit/test_environment_lookups.py similarity index 98% rename from funcx_sdk/tests/unit/test_environment_lookups.py rename to compute_sdk/tests/unit/test_environment_lookups.py index dc388de08..ae091dc9b 100644 --- a/funcx_sdk/tests/unit/test_environment_lookups.py +++ b/compute_sdk/tests/unit/test_environment_lookups.py @@ -1,6 +1,5 @@ import pytest - -from funcx.sdk._environments import ( +from globus_compute_sdk.sdk._environments import ( get_web_service_url, get_web_socket_url, urls_might_mismatch, diff --git a/funcx_sdk/tests/unit/test_executor.py b/compute_sdk/tests/unit/test_executor.py similarity index 63% rename from funcx_sdk/tests/unit/test_executor.py rename to compute_sdk/tests/unit/test_executor.py index 0c9e3b460..ae12f5074 100644 --- a/funcx_sdk/tests/unit/test_executor.py +++ b/compute_sdk/tests/unit/test_executor.py @@ -8,16 +8,15 @@ import pika import pytest -from funcx_common import messagepack -from funcx_common.messagepack.message_types import Result, ResultErrorDetails +from globus_compute_common import messagepack +from globus_compute_common.messagepack.message_types import Result, ResultErrorDetails +from globus_compute_sdk import Client, Executor +from globus_compute_sdk.errors import TaskExecutionFailed +from globus_compute_sdk.sdk.asynchronous.compute_future import ComputeFuture +from globus_compute_sdk.sdk.executor import TaskSubmissionInfo, _ResultWatcher +from globus_compute_sdk.serialize.facade import ComputeSerializer from tests.utils import try_assert, try_for_timeout -from funcx import FuncXClient, FuncXExecutor -from funcx.errors import FuncxTaskExecutionFailed -from funcx.sdk.asynchronous.funcx_future import FuncXFuture -from funcx.sdk.executor import TaskSubmissionInfo, _ResultWatcher -from funcx.serialize.facade import FuncXSerializer - def _is_stopped(thread: threading.Thread | None) -> bool: def _wrapped(): @@ -30,9 +29,9 @@ def noop(): return 1 -class MockedFuncXExecutor(FuncXExecutor): +class MockedExecutor(Executor): def __init__(self, *args, **kwargs): - kwargs.update({"funcx_client": mock.Mock(spec=FuncXClient)}) + kwargs.update({"funcx_client": mock.Mock(spec=Client)}) super().__init__(*args, **kwargs) self._time_to_stop_mock = threading.Event() self._task_submitter_exception: t.Type[Exception] | None = None @@ -69,32 +68,34 @@ def join(self, timeout: float | None = None) -> None: @pytest.fixture -def fxexecutor(mocker): - fxc = mock.MagicMock() - fxc.session_task_group_id = str(uuid.uuid4()) - fxe = FuncXExecutor(funcx_client=fxc) - watcher = mocker.patch("funcx.sdk.executor._ResultWatcher", autospec=True) +def gc_executor(mocker): + gcc = mock.MagicMock() + gcc.session_task_group_id = str(uuid.uuid4()) + gce = Executor(funcx_client=gcc) + watcher = mocker.patch( + "globus_compute_sdk.sdk.executor._ResultWatcher", autospec=True + ) def create_mock_watcher(*args, **kwargs): - return MockedResultWatcher(fxe) + return MockedResultWatcher(gce) watcher.side_effect = create_mock_watcher - yield fxc, fxe + yield gcc, gce - fxe.shutdown(wait=False, cancel_futures=True) - try_for_timeout(_is_stopped(fxe._task_submitter)) - try_for_timeout(_is_stopped(fxe._result_watcher)) + gce.shutdown(wait=False, cancel_futures=True) + try_for_timeout(_is_stopped(gce._task_submitter)) + try_for_timeout(_is_stopped(gce._result_watcher)) - if not _is_stopped(fxe._task_submitter)(): - trepr = repr(fxe._task_submitter) + if not _is_stopped(gce._task_submitter)(): + trepr = repr(gce._task_submitter) raise RuntimeError( - "FuncXExecutor still running: _task_submitter thread alive: %s" % trepr + "Executor still running: _task_submitter thread alive: %s" % trepr ) - if not _is_stopped(fxe._result_watcher)(): - trepr = repr(fxe._result_watcher) + if not _is_stopped(gce._result_watcher)(): + trepr = repr(gce._result_watcher) raise RuntimeError( - "FuncXExecutor still running: _result_watcher thread alive: %r" % trepr + "Executor still running: _result_watcher thread alive: %r" % trepr ) @@ -116,19 +117,19 @@ def test_task_submission_info_stringification(): @pytest.mark.parametrize("argname", ("batch_interval", "batch_enabled")) def test_deprecated_args_warned(argname, mocker): - mock_warn = mocker.patch("funcx.sdk.executor.warnings") - fxc = mock.Mock(spec=FuncXClient) - FuncXExecutor(funcx_client=fxc).shutdown() + mock_warn = mocker.patch("globus_compute_sdk.sdk.executor.warnings") + gcc = mock.Mock(spec=Client) + Executor(funcx_client=gcc).shutdown() mock_warn.warn.assert_not_called() - FuncXExecutor(funcx_client=fxc, **{argname: 1}).shutdown() + Executor(funcx_client=gcc, **{argname: 1}).shutdown() mock_warn.warn.assert_called() def test_invalid_args_raise(randomstring): invalid_arg = f"abc_{randomstring()}" with pytest.raises(TypeError) as wrapped_err: - FuncXExecutor(**{invalid_arg: 1}).shutdown() + Executor(**{invalid_arg: 1}).shutdown() err = wrapped_err.value assert "invalid argument" in str(err) @@ -136,153 +137,153 @@ def test_invalid_args_raise(randomstring): def test_creates_default_client_if_none_provided(mocker): - mock_fxc_klass = mocker.patch("funcx.sdk.executor.FuncXClient") - FuncXExecutor().shutdown() + mock_gcc_klass = mocker.patch("globus_compute_sdk.sdk.executor.Client") + Executor().shutdown() - mock_fxc_klass.assert_called() + mock_gcc_klass.assert_called() -def test_executor_shutdown(fxexecutor): - fxc, fxe = fxexecutor - fxe.shutdown() +def test_executor_shutdown(gc_executor): + gcc, gce = gc_executor + gce.shutdown() - try_assert(_is_stopped(fxe._task_submitter)) - try_assert(_is_stopped(fxe._result_watcher)) + try_assert(_is_stopped(gce._task_submitter)) + try_assert(_is_stopped(gce._result_watcher)) -def test_executor_context_manager(fxexecutor): - fxc, fxe = fxexecutor - with fxe: +def test_executor_context_manager(gc_executor): + gcc, gce = gc_executor + with gce: pass - assert _is_stopped(fxe._task_submitter) - assert _is_stopped(fxe._result_watcher) + assert _is_stopped(gce._task_submitter) + assert _is_stopped(gce._result_watcher) -def test_property_task_group_id_is_isolated(fxexecutor): - fxc, fxe = fxexecutor - assert fxe.task_group_id != fxc.session_task_group_id +def test_property_task_group_id_is_isolated(gc_executor): + gcc, gce = gc_executor + assert gce.task_group_id != gcc.session_task_group_id - fxe.task_group_id = uuid.uuid4() - assert fxe.task_group_id != fxc.session_task_group_id + gce.task_group_id = uuid.uuid4() + assert gce.task_group_id != gcc.session_task_group_id -def test_multiple_register_function_fails(fxexecutor): - fxc, fxe = fxexecutor +def test_multiple_register_function_fails(gc_executor): + gcc, gce = gc_executor - fxc.register_function.return_value = "abc" + gcc.register_function.return_value = "abc" - fxe.register_function(noop) + gce.register_function(noop) with pytest.raises(ValueError): - fxe.register_function(noop) + gce.register_function(noop) - try_assert(lambda: fxe._stopped) + try_assert(lambda: gce._stopped) with pytest.raises(RuntimeError): - fxe.register_function(noop) + gce.register_function(noop) -def test_shortcut_register_function(fxexecutor): - fxc, fxe = fxexecutor +def test_shortcut_register_function(gc_executor): + gcc, gce = gc_executor fn_id = str(uuid.uuid4()) - fxe.register_function(noop, function_id=fn_id) + gce.register_function(noop, function_id=fn_id) with pytest.raises(ValueError): - fxe.register_function(noop, function_id=fn_id) + gce.register_function(noop, function_id=fn_id) - fxc.register_function.assert_not_called() + gcc.register_function.assert_not_called() -def test_failed_registration_shuts_down_executor(fxexecutor, randomstring): - fxc, fxe = fxexecutor +def test_failed_registration_shuts_down_executor(gc_executor, randomstring): + gcc, gce = gc_executor exc_text = randomstring() - fxc.register_function.side_effect = Exception(exc_text) + gcc.register_function.side_effect = Exception(exc_text) with pytest.raises(Exception) as wrapped_exc: - fxe.register_function(noop) + gce.register_function(noop) exc = wrapped_exc.value assert exc_text in str(exc) - try_assert(lambda: fxe._stopped) + try_assert(lambda: gce._stopped) -def test_submit_raises_if_thread_stopped(fxexecutor): - fxc, fxe = fxexecutor - fxe.shutdown() +def test_submit_raises_if_thread_stopped(gc_executor): + gcc, gce = gc_executor + gce.shutdown() - try_assert(_is_stopped(fxe._task_submitter), "Test prerequisite") + try_assert(_is_stopped(gce._task_submitter), "Test prerequisite") with pytest.raises(RuntimeError) as wrapped_exc: - fxe.submit(noop) + gce.submit(noop) err = wrapped_exc.value assert " is shutdown;" in str(err) -def test_submit_auto_registers_function(fxexecutor): - fxc, fxe = fxexecutor +def test_submit_auto_registers_function(gc_executor): + gcc, gce = gc_executor - fxc.register_function.return_value = "abc" - fxe.endpoint_id = "some_ep_id" - fxe.submit(noop) + gcc.register_function.return_value = "abc" + gce.endpoint_id = "some_ep_id" + gce.submit(noop) - assert fxc.register_function.called + assert gcc.register_function.called -def test_submit_value_error_if_no_endpoint(fxexecutor): - fxc, fxe = fxexecutor +def test_submit_value_error_if_no_endpoint(gc_executor): + gcc, gce = gc_executor with pytest.raises(ValueError) as pytest_exc: - fxe.submit(noop) + gce.submit(noop) err = pytest_exc.value assert "No endpoint_id set" in str(err) - assert " fxe = FuncXExecutor(endpoint_id=" in str(err), "Expected hint" - try_assert(_is_stopped(fxe._task_submitter), "Expected graceful shutdown on error") + assert " gce = Executor(endpoint_id=" in str(err), "Expected hint" + try_assert(_is_stopped(gce._task_submitter), "Expected graceful shutdown on error") -def test_same_function_different_containers_allowed(fxexecutor): - fxc, fxe = fxexecutor +def test_same_function_different_containers_allowed(gc_executor): + gcc, gce = gc_executor c1_id, c2_id = str(uuid.uuid4()), str(uuid.uuid4()) - fxe.container_id = c1_id - fxe.register_function(noop) - fxe.container_id = c2_id - fxe.register_function(noop) + gce.container_id = c1_id + gce.register_function(noop) + gce.container_id = c2_id + gce.register_function(noop) with pytest.raises(ValueError, match="already registered"): - fxe.register_function(noop) + gce.register_function(noop) -def test_map_raises(fxexecutor): - fxc, fxe = fxexecutor +def test_map_raises(gc_executor): + gcc, gce = gc_executor with pytest.raises(NotImplementedError): - fxe.map(noop) + gce.map(noop) @pytest.mark.parametrize("num_tasks", [0, 1, 2, 10]) -def test_reload_tasks_none_completed(fxexecutor, mocker, num_tasks): - fxc, fxe = fxexecutor +def test_reload_tasks_none_completed(gc_executor, mocker, num_tasks): + gcc, gce = gc_executor - mock_log = mocker.patch("funcx.sdk.executor.log") + mock_log = mocker.patch("globus_compute_sdk.sdk.executor.log") mock_data = { - "taskgroup_id": fxe.task_group_id, + "taskgroup_id": gce.task_group_id, "tasks": [{"id": uuid.uuid4()} for _ in range(num_tasks)], } mock_batch_result = {t["id"]: t for t in mock_data["tasks"]} mock_batch_result = mock.MagicMock(data={"results": mock_batch_result}) - fxc.web_client.get_taskgroup_tasks.return_value = mock_data - fxc.web_client.get_batch_status.return_value = mock_batch_result + gcc.web_client.get_taskgroup_tasks.return_value = mock_data + gcc.web_client.get_batch_status.return_value = mock_batch_result - client_futures = list(fxe.reload_tasks()) + client_futures = list(gce.reload_tasks()) if num_tasks == 0: log_args, log_kwargs = mock_log.warning.call_args assert "Received no tasks" in log_args[0] - assert fxe.task_group_id in log_args[0] + assert gce.task_group_id in log_args[0] else: assert not mock_log.warning.called @@ -291,19 +292,19 @@ def test_reload_tasks_none_completed(fxexecutor, mocker, num_tasks): @pytest.mark.parametrize("num_tasks", [1, 2, 10]) -def test_reload_tasks_some_completed(fxexecutor, mocker, num_tasks): - fxc, fxe = fxexecutor +def test_reload_tasks_some_completed(gc_executor, mocker, num_tasks): + gcc, gce = gc_executor - mock_log = mocker.patch("funcx.sdk.executor.log") + mock_log = mocker.patch("globus_compute_sdk.sdk.executor.log") mock_data = { - "taskgroup_id": fxe.task_group_id, + "taskgroup_id": gce.task_group_id, "tasks": [{"id": uuid.uuid4()} for _ in range(num_tasks)], } num_completed = random.randint(1, num_tasks) num_i = 0 - serialize = FuncXSerializer().serialize + serialize = ComputeSerializer().serialize mock_batch_result = {t["id"]: t for t in mock_data["tasks"]} for t_id in mock_batch_result: if num_i >= num_completed: @@ -314,14 +315,14 @@ def test_reload_tasks_some_completed(fxexecutor, mocker, num_tasks): mock_batch_result[t_id]["result"] = serialize("abc") mock_batch_result = mock.MagicMock(data={"results": mock_batch_result}) - fxc.web_client.get_taskgroup_tasks.return_value = mock_data - fxc.web_client.get_batch_status.return_value = mock_batch_result + gcc.web_client.get_taskgroup_tasks.return_value = mock_data + gcc.web_client.get_batch_status.return_value = mock_batch_result - client_futures = list(fxe.reload_tasks()) + client_futures = list(gce.reload_tasks()) if num_tasks == 0: log_args, log_kwargs = mock_log.warning.call_args assert "Received no tasks" in log_args[0] - assert fxe.task_group_id in log_args[0] + assert gce.task_group_id in log_args[0] else: assert not mock_log.warning.called @@ -329,14 +330,14 @@ def test_reload_tasks_some_completed(fxexecutor, mocker, num_tasks): assert sum(1 for fut in client_futures if fut.done()) == num_completed -def test_reload_tasks_all_completed(fxexecutor): - fxc, fxe = fxexecutor +def test_reload_tasks_all_completed(gc_executor): + gcc, gce = gc_executor - serialize = FuncXSerializer().serialize + serialize = ComputeSerializer().serialize num_tasks = 5 mock_data = { - "taskgroup_id": fxe.task_group_id, + "taskgroup_id": gce.task_group_id, "tasks": [ { "id": uuid.uuid4(), @@ -351,87 +352,87 @@ def test_reload_tasks_all_completed(fxexecutor): mock_batch_result = {t["id"]: t for t in mock_data["tasks"]} mock_batch_result = mock.MagicMock(data={"results": mock_batch_result}) - fxc.web_client.get_taskgroup_tasks.return_value = mock_data - fxc.web_client.get_batch_status.return_value = mock_batch_result + gcc.web_client.get_taskgroup_tasks.return_value = mock_data + gcc.web_client.get_batch_status.return_value = mock_batch_result - client_futures = list(fxe.reload_tasks()) + client_futures = list(gce.reload_tasks()) assert len(client_futures) == num_tasks assert sum(1 for fut in client_futures if fut.done()) == num_tasks - assert fxe._result_watcher is None, "Should NOT start watcher: all tasks done!" + assert gce._result_watcher is None, "Should NOT start watcher: all tasks done!" -def test_reload_starts_new_watcher(fxexecutor): - fxc, fxe = fxexecutor +def test_reload_starts_new_watcher(gc_executor): + gcc, gce = gc_executor num_tasks = 3 mock_data = { - "taskgroup_id": fxe.task_group_id, + "taskgroup_id": gce.task_group_id, "tasks": [{"id": uuid.uuid4()} for _ in range(num_tasks)], } mock_batch_result = {t["id"]: t for t in mock_data["tasks"]} mock_batch_result = mock.MagicMock(data={"results": mock_batch_result}) - fxc.web_client.get_taskgroup_tasks.return_value = mock_data - fxc.web_client.get_batch_status.return_value = mock_batch_result + gcc.web_client.get_taskgroup_tasks.return_value = mock_data + gcc.web_client.get_batch_status.return_value = mock_batch_result - client_futures = list(fxe.reload_tasks()) + client_futures = list(gce.reload_tasks()) assert len(client_futures) == num_tasks - try_assert(lambda: fxe._result_watcher.is_alive()) - watcher_1 = fxe._result_watcher + try_assert(lambda: gce._result_watcher.is_alive()) + watcher_1 = gce._result_watcher - client_futures = list(fxe.reload_tasks()) - try_assert(lambda: fxe._result_watcher.is_alive()) - watcher_2 = fxe._result_watcher + client_futures = list(gce.reload_tasks()) + try_assert(lambda: gce._result_watcher.is_alive()) + watcher_2 = gce._result_watcher assert watcher_1 is not watcher_2 -def test_reload_tasks_cancels_existing_futures(fxexecutor, randomstring): - fxc, fxe = fxexecutor +def test_reload_tasks_cancels_existing_futures(gc_executor, randomstring): + gcc, gce = gc_executor def mock_data(): return { - "taskgroup_id": fxe.task_group_id, + "taskgroup_id": gce.task_group_id, "tasks": [{"id": uuid.uuid4()} for i in range(random.randint(0, 20))], } - fxc.web_client.get_taskgroup_tasks.return_value = mock_data() + gcc.web_client.get_taskgroup_tasks.return_value = mock_data() - client_futures_1 = list(fxe.reload_tasks()) - fxc.get_taskgroup_tasks.return_value = mock_data() - client_futures_2 = list(fxe.reload_tasks()) + client_futures_1 = list(gce.reload_tasks()) + gcc.get_taskgroup_tasks.return_value = mock_data() + client_futures_2 = list(gce.reload_tasks()) assert all(fut.done() for fut in client_futures_1) assert all(fut.cancelled() for fut in client_futures_1) assert not any(fut.done() for fut in client_futures_2) -def test_reload_client_taskgroup_tasks_fails_gracefully(fxexecutor): - fxc, fxe = fxexecutor +def test_reload_client_taskgroup_tasks_fails_gracefully(gc_executor): + gcc, gce = gc_executor mock_datum = ( - (KeyError, {"mispeleed": fxe.task_group_id}), + (KeyError, {"mispeleed": gce.task_group_id}), (ValueError, {"taskgroup_id": "abcd"}), - (None, {"taskgroup_id": fxe.task_group_id}), + (None, {"taskgroup_id": gce.task_group_id}), ) for expected_exc_class, md in mock_datum: - fxc.web_client.get_taskgroup_tasks.return_value = md + gcc.web_client.get_taskgroup_tasks.return_value = md if expected_exc_class: with pytest.raises(expected_exc_class): - fxe.reload_tasks() + gce.reload_tasks() else: - fxe.reload_tasks() + gce.reload_tasks() -def test_reload_sets_failed_tasks(fxexecutor): - fxc, fxe = fxexecutor +def test_reload_sets_failed_tasks(gc_executor): + gcc, gce = gc_executor mock_data = { - "taskgroup_id": fxe.task_group_id, + "taskgroup_id": gce.task_group_id, "tasks": [ {"id": uuid.uuid4(), "completion_t": 1, "exception": "doh!"} for i in range(random.randint(0, 10)) @@ -441,21 +442,21 @@ def test_reload_sets_failed_tasks(fxexecutor): mock_batch_result = {t["id"]: t for t in mock_data["tasks"]} mock_batch_result = mock.MagicMock(data={"results": mock_batch_result}) - fxc.web_client.get_taskgroup_tasks.return_value = mock_data - fxc.web_client.get_batch_status.return_value = mock_batch_result + gcc.web_client.get_taskgroup_tasks.return_value = mock_data + gcc.web_client.get_batch_status.return_value = mock_batch_result - futs = list(fxe.reload_tasks()) + futs = list(gce.reload_tasks()) assert all(fut.done() for fut in futs) assert all("doh!" in str(fut.exception()) for fut in futs) -def test_reload_handles_deseralization_error_gracefully(fxexecutor): - fxc, fxe = fxexecutor - fxc.fx_serializer = FuncXSerializer() +def test_reload_handles_deseralization_error_gracefully(gc_executor): + gcc, gce = gc_executor + gcc.fx_serializer = ComputeSerializer() mock_data = { - "taskgroup_id": fxe.task_group_id, + "taskgroup_id": gce.task_group_id, "tasks": [ {"id": uuid.uuid4(), "completion_t": 1, "result": "a", "status": "success"} for i in range(random.randint(0, 10)) @@ -465,91 +466,91 @@ def test_reload_handles_deseralization_error_gracefully(fxexecutor): mock_batch_result = {t["id"]: t for t in mock_data["tasks"]} mock_batch_result = mock.MagicMock(data={"results": mock_batch_result}) - fxc.web_client.get_taskgroup_tasks.return_value = mock_data - fxc.web_client.get_batch_status.return_value = mock_batch_result + gcc.web_client.get_taskgroup_tasks.return_value = mock_data + gcc.web_client.get_batch_status.return_value = mock_batch_result - futs = list(fxe.reload_tasks()) + futs = list(gce.reload_tasks()) assert all(fut.done() for fut in futs) assert all("Failed to set " in str(fut.exception()) for fut in futs) @pytest.mark.parametrize("batch_size", tuple(range(1, 11))) -def test_task_submitter_respects_batch_size(fxexecutor, batch_size: int): - fxc, fxe = fxexecutor +def test_task_submitter_respects_batch_size(gc_executor, batch_size: int): + gcc, gce = gc_executor - fxc.create_batch.side_effect = mock.MagicMock - fxc.register_function.return_value = "abc" + gcc.create_batch.side_effect = mock.MagicMock + gcc.register_function.return_value = "abc" num_batches = 50 - fxe.endpoint_id = "some_ep_id" - fxe.batch_size = batch_size + gce.endpoint_id = "some_ep_id" + gce.batch_size = batch_size for _ in range(num_batches * batch_size): - fxe.submit(noop) - fxe.shutdown(cancel_futures=True) + gce.submit(noop) + gce.shutdown(cancel_futures=True) - for args, _kwargs in fxc.batch_run.call_args_list: + for args, _kwargs in gcc.batch_run.call_args_list: batch, *_ = args assert batch.add.call_count <= batch_size def test_task_submitter_stops_executor_on_exception(): - fxe = MockedFuncXExecutor() - fxe._tasks_to_send.put(("too", "much", "destructuring", "!!")) + gce = MockedExecutor() + gce._tasks_to_send.put(("too", "much", "destructuring", "!!")) - try_assert(lambda: fxe._stopped) - try_assert(lambda: isinstance(fxe._task_submitter_exception, ValueError)) + try_assert(lambda: gce._stopped) + try_assert(lambda: isinstance(gce._task_submitter_exception, ValueError)) def test_task_submitter_stops_executor_on_upstream_error_response(randomstring): - fxe = MockedFuncXExecutor() + gce = MockedExecutor() upstream_error = Exception(f"Upstream error {randomstring}!!") - fxe.funcx_client.batch_run.side_effect = upstream_error - fxe.task_group_id = "abc" + gce.funcx_client.batch_run.side_effect = upstream_error + gce.task_group_id = "abc" tsi = TaskSubmissionInfo( task_num=12345, function_id="abc", endpoint_id="abc", args=(), kwargs={} ) - fxe._tasks_to_send.put((FuncXFuture(), tsi)) + gce._tasks_to_send.put((ComputeFuture(), tsi)) - try_assert(lambda: fxe._stopped) - try_assert(lambda: str(upstream_error) == str(fxe._task_submitter_exception)) + try_assert(lambda: gce._stopped) + try_assert(lambda: str(upstream_error) == str(gce._task_submitter_exception)) -def test_task_submitter_handles_stale_result_watcher_gracefully(fxexecutor, mocker): - fxc, fxe = fxexecutor - fxe.endpoint_id = "blah" +def test_task_submitter_handles_stale_result_watcher_gracefully(gc_executor, mocker): + gcc, gce = gc_executor + gce.endpoint_id = "blah" task_id = str(uuid.uuid4()) - fxc.batch_run.return_value = [task_id] - fxe.submit(noop) - try_assert(lambda: bool(fxe._result_watcher), "Test prerequisite") - try_assert(lambda: bool(fxe._result_watcher._open_futures), "Test prerequisite") - watcher_1 = fxe._result_watcher + gcc.batch_run.return_value = [task_id] + gce.submit(noop) + try_assert(lambda: bool(gce._result_watcher), "Test prerequisite") + try_assert(lambda: bool(gce._result_watcher._open_futures), "Test prerequisite") + watcher_1 = gce._result_watcher watcher_1._closed = True # simulate shutting down, but not yet stopped watcher_1._time_to_stop = True - fxe.submit(noop) - try_assert(lambda: fxe._result_watcher is not watcher_1, "Test prerequisite") + gce.submit(noop) + try_assert(lambda: gce._result_watcher is not watcher_1, "Test prerequisite") -def test_task_submitter_sets_future_task_ids(fxexecutor): - fxc, fxe = fxexecutor +def test_task_submitter_sets_future_task_ids(gc_executor): + gcc, gce = gc_executor num_tasks = random.randint(2, 20) - futs = [FuncXFuture() for _ in range(num_tasks)] + futs = [ComputeFuture() for _ in range(num_tasks)] batch_ids = [uuid.uuid4() for _ in range(num_tasks)] - fxc.batch_run.return_value = batch_ids - fxe._submit_tasks(futs, []) + gcc.batch_run.return_value = batch_ids + gce._submit_tasks(futs, []) assert all(f.task_id == task_id for f, task_id in zip(futs, batch_ids)) def test_resultwatcher_stops_if_unable_to_connect(mocker): - mock_time = mocker.patch("funcx.sdk.executor.time") - fxe = mock.Mock(spec=FuncXExecutor) - rw = _ResultWatcher(fxe) + mock_time = mocker.patch("globus_compute_sdk.sdk.executor.time") + gce = mock.Mock(spec=Executor) + rw = _ResultWatcher(gce) rw._connect = mock.Mock(return_value=mock.Mock(spec=pika.SelectConnection)) rw.run() @@ -558,23 +559,23 @@ def test_resultwatcher_stops_if_unable_to_connect(mocker): def test_resultwatcher_ignores_invalid_tasks(mocker): - fxe = mock.Mock(spec=FuncXExecutor) - rw = _ResultWatcher(fxe) + gce = mock.Mock(spec=Executor) + rw = _ResultWatcher(gce) rw._connect = mock.Mock(return_value=mock.Mock(spec=pika.SelectConnection)) - futs = [FuncXFuture() for i in range(random.randint(1, 10))] + futs = [ComputeFuture() for i in range(random.randint(1, 10))] futs[0].task_id = uuid.uuid4() num_added = rw.watch_for_task_results(futs) assert 1 == num_added def test_resultwatcher_cancels_futures_on_unexpected_stop(mocker): - mocker.patch("funcx.sdk.executor.time") - fxe = mock.Mock(spec=FuncXExecutor) - rw = _ResultWatcher(fxe) + mocker.patch("globus_compute_sdk.sdk.executor.time") + gce = mock.Mock(spec=Executor) + rw = _ResultWatcher(gce) rw._connect = mock.Mock(return_value=mock.Mock(spec=pika.SelectConnection)) - fut = FuncXFuture(task_id=uuid.uuid4()) + fut = ComputeFuture(task_id=uuid.uuid4()) rw.watch_for_task_results([fut]) rw.run() @@ -582,10 +583,10 @@ def test_resultwatcher_cancels_futures_on_unexpected_stop(mocker): def test_resultwatcher_gracefully_handles_unexpected_exception(mocker): - mocker.patch("funcx.sdk.executor.time") - mock_log = mocker.patch("funcx.sdk.executor.log") - fxe = mock.Mock(spec=FuncXExecutor) - rw = _ResultWatcher(fxe) + mocker.patch("globus_compute_sdk.sdk.executor.time") + mock_log = mocker.patch("globus_compute_sdk.sdk.executor.log") + gce = mock.Mock(spec=Executor) + rw = _ResultWatcher(gce) rw._connect = mock.Mock(return_value=mock.Mock(spec=pika.SelectConnection)) rw._event_watcher = mock.Mock(side_effect=Exception) @@ -597,7 +598,7 @@ def test_resultwatcher_gracefully_handles_unexpected_exception(mocker): def test_resultwatcher_blocks_until_tasks_done(): - fut = FuncXFuture(task_id=uuid.uuid4()) + fut = ComputeFuture(task_id=uuid.uuid4()) mrw = MockedResultWatcher(mock.Mock()) mrw.watch_for_task_results([fut]) mrw.start() @@ -612,7 +613,7 @@ def test_resultwatcher_blocks_until_tasks_done(): def test_resultwatcher_does_not_check_if_no_results(): - fut = FuncXFuture(task_id=uuid.uuid4()) + fut = ComputeFuture(task_id=uuid.uuid4()) mrw = MockedResultWatcher(mock.Mock()) mrw._match_results_to_futures = mock.Mock() mrw.watch_for_task_results([fut]) @@ -624,7 +625,7 @@ def test_resultwatcher_does_not_check_if_no_results(): def test_resultwatcher_checks_match_if_results(): - fut = FuncXFuture(task_id=uuid.uuid4()) + fut = ComputeFuture(task_id=uuid.uuid4()) res = Result(task_id=fut.task_id, data="abc123") mrw = MockedResultWatcher(mock.Mock()) @@ -666,8 +667,8 @@ def test_resultwatcher_repr(): def test_resultwatcher_match_sets_exception(randomstring): payload = randomstring() - fxs = FuncXSerializer() - fut = FuncXFuture(task_id=uuid.uuid4()) + fxs = ComputeSerializer() + fut = ComputeFuture(task_id=uuid.uuid4()) err_details = ResultErrorDetails(code="1234", user_message="some_user_message") res = Result(task_id=fut.task_id, error_details=err_details, data=payload) @@ -679,14 +680,14 @@ def test_resultwatcher_match_sets_exception(randomstring): mrw._event_watcher() assert payload in str(fut.exception()) - assert isinstance(fut.exception(), FuncxTaskExecutionFailed) + assert isinstance(fut.exception(), TaskExecutionFailed) mrw.shutdown() def test_resultwatcher_match_sets_result(randomstring): payload = randomstring() - fxs = FuncXSerializer() - fut = FuncXFuture(task_id=uuid.uuid4()) + fxs = ComputeSerializer() + fut = ComputeFuture(task_id=uuid.uuid4()) res = Result(task_id=fut.task_id, data=fxs.serialize(payload)) mrw = MockedResultWatcher(mock.Mock()) @@ -702,8 +703,8 @@ def test_resultwatcher_match_sets_result(randomstring): def test_resultwatcher_match_handles_deserialization_error(): invalid_payload = "invalidly serialized" - fxs = FuncXSerializer() - fut = FuncXFuture(task_id=uuid.uuid4()) + fxs = ComputeSerializer() + fut = ComputeFuture(task_id=uuid.uuid4()) res = Result(task_id=fut.task_id, data=invalid_payload) mrw = MockedResultWatcher(mock.Mock()) @@ -721,7 +722,7 @@ def test_resultwatcher_match_handles_deserialization_error(): @pytest.mark.parametrize("unpacked", ("not_a_Result", Exception)) def test_resultwatcher_onmessage_verifies_result_type(mocker, unpacked): - mock_unpack = mocker.patch("funcx.sdk.executor.messagepack.unpack") + mock_unpack = mocker.patch("globus_compute_sdk.sdk.executor.messagepack.unpack") mock_unpack.side_effect = unpacked mock_channel = mock.Mock() @@ -748,7 +749,7 @@ def test_resultwatcher_onmessage_sets_check_results_flag(): @pytest.mark.parametrize("exc", (MemoryError("some description"), "some description")) def test_resultwatcher_stops_loop_on_open_failure(mocker, exc): - mock_log = mocker.patch("funcx.sdk.executor.log", autospec=True) + mock_log = mocker.patch("globus_compute_sdk.sdk.executor.log", autospec=True) mrw = MockedResultWatcher(mock.Mock()) mrw.start() diff --git a/funcx_sdk/tests/unit/test_login_manager.py b/compute_sdk/tests/unit/test_login_manager.py similarity index 92% rename from funcx_sdk/tests/unit/test_login_manager.py rename to compute_sdk/tests/unit/test_login_manager.py index 26e5b7382..69f5da5d0 100644 --- a/funcx_sdk/tests/unit/test_login_manager.py +++ b/compute_sdk/tests/unit/test_login_manager.py @@ -6,19 +6,18 @@ import globus_sdk import pytest import requests - -from funcx.sdk._environments import _get_envname -from funcx.sdk.login_manager import LoginManager, requires_login -from funcx.sdk.login_manager.client_login import ( +from globus_compute_sdk.sdk._environments import _get_envname +from globus_compute_sdk.sdk.login_manager import LoginManager, requires_login +from globus_compute_sdk.sdk.login_manager.client_login import ( _get_client_creds_from_env, get_client_login, is_client_login, ) -from funcx.sdk.login_manager.tokenstore import _resolve_namespace +from globus_compute_sdk.sdk.login_manager.tokenstore import _resolve_namespace CID_KEY = "FUNCX_SDK_CLIENT_ID" CSC_KEY = "FUNCX_SDK_CLIENT_SECRET" -MOCK_BASE = "funcx.sdk.login_manager" +MOCK_BASE = "globus_compute_sdk.sdk.login_manager" def _fake_http_response(*, status: int = 200, method: str = "GET") -> requests.Response: @@ -207,8 +206,8 @@ def test_requires_login_decorator(mocker, logman): mocked_run_login_flow = mocker.patch( f"{MOCK_BASE}.manager.LoginManager.run_login_flow" ) - mocked_get_funcx_web_client = mocker.patch( - f"{MOCK_BASE}.manager.LoginManager.get_funcx_web_client" + mocked_get_web_client = mocker.patch( + f"{MOCK_BASE}.manager.LoginManager.get_web_client" ) expected = "expected result" @@ -230,9 +229,9 @@ class MockClient: res = mock_client.upstream_call(None) # case: no need to reauth assert res == expected assert not mocked_run_login_flow.called - assert not mocked_get_funcx_web_client.called + assert not mocked_get_web_client.called res = mock_client.upstream_call(None) # case: now must reauth assert res == expected assert mocked_run_login_flow.called - assert mocked_get_funcx_web_client.called + assert mocked_get_web_client.called diff --git a/funcx_sdk/tests/unit/test_printing.py b/compute_sdk/tests/unit/test_printing.py similarity index 96% rename from funcx_sdk/tests/unit/test_printing.py rename to compute_sdk/tests/unit/test_printing.py index 062aa8aeb..a710056d0 100644 --- a/funcx_sdk/tests/unit/test_printing.py +++ b/compute_sdk/tests/unit/test_printing.py @@ -2,8 +2,7 @@ from contextlib import redirect_stdout import pytest - -from funcx.sdk.utils.printing import print_table +from globus_compute_sdk.sdk.utils.printing import print_table @pytest.mark.parametrize( diff --git a/funcx_sdk/tests/unit/test_version_parse.py b/compute_sdk/tests/unit/test_version_parse.py similarity index 86% rename from funcx_sdk/tests/unit/test_version_parse.py rename to compute_sdk/tests/unit/test_version_parse.py index 2f3138f45..15d3a2ff7 100644 --- a/funcx_sdk/tests/unit/test_version_parse.py +++ b/compute_sdk/tests/unit/test_version_parse.py @@ -1,7 +1,6 @@ import pytest - -from funcx.errors import VersionMismatch -from funcx.version import compare_versions +from globus_compute_sdk.errors import VersionMismatch +from globus_compute_sdk.version import compare_versions @pytest.mark.parametrize( diff --git a/funcx_sdk/tests/unit/test_web_client.py b/compute_sdk/tests/unit/test_web_client.py similarity index 87% rename from funcx_sdk/tests/unit/test_web_client.py rename to compute_sdk/tests/unit/test_web_client.py index be369eafe..6665a8737 100644 --- a/funcx_sdk/tests/unit/test_web_client.py +++ b/compute_sdk/tests/unit/test_web_client.py @@ -2,9 +2,8 @@ import pytest import responses - -from funcx.sdk.web_client import FuncxWebClient -from funcx.version import __version__ +from globus_compute_sdk.sdk.web_client import WebClient +from globus_compute_sdk.version import __version__ @pytest.fixture(autouse=True) @@ -24,14 +23,12 @@ def mocked_responses(): @pytest.fixture def client(): # for the default test client, set a fake URL and disable retries - return FuncxWebClient( - base_url="https://api.funcx", transport_params={"max_retries": 0} - ) + return WebClient(base_url="https://api.funcx", transport_params={"max_retries": 0}) def test_web_client_can_set_explicit_base_url(): - c1 = FuncxWebClient(base_url="https://foo.example.com/") - c2 = FuncxWebClient(base_url="https://bar.example.com/") + c1 = WebClient(base_url="https://foo.example.com/") + c2 = WebClient(base_url="https://bar.example.com/") assert c1.base_url == "https://foo.example.com/" assert c2.base_url == "https://bar.example.com/" @@ -60,7 +57,7 @@ def test_get_version_service_param(client, service_param): @pytest.mark.parametrize("user_app_name", [None, "bar"]) def test_app_name_from_constructor(user_app_name): - client = FuncxWebClient( + client = WebClient( # use the same fake URL and disable retries as in the default test case base_url="https://api.funcx", transport_params={"max_retries": 0}, @@ -70,7 +67,7 @@ def test_app_name_from_constructor(user_app_name): assert client.user_app_name == user_app_name assert __version__ in client.app_name - assert "funcx" in client.app_name + assert "globus-compute-sdk" in client.app_name if user_app_name: assert user_app_name in client.app_name @@ -81,7 +78,7 @@ def test_user_app_name_property(client, user_app_name): assert client.user_app_name == user_app_name assert __version__ in client.app_name - assert "funcx" in client.app_name + assert "globus-compute-sdk" in client.app_name if user_app_name: assert user_app_name in client.app_name diff --git a/funcx_sdk/tests/unit/test_whoami.py b/compute_sdk/tests/unit/test_whoami.py similarity index 91% rename from funcx_sdk/tests/unit/test_whoami.py rename to compute_sdk/tests/unit/test_whoami.py index 354d7aae2..d807fd39d 100644 --- a/funcx_sdk/tests/unit/test_whoami.py +++ b/compute_sdk/tests/unit/test_whoami.py @@ -1,9 +1,8 @@ +import globus_compute_sdk.sdk.login_manager import pytest +from globus_compute_sdk.sdk.login_manager.whoami import print_whoami_info -import funcx.sdk.login_manager -from funcx.sdk.login_manager.whoami import print_whoami_info - -MOCK_BASE = "funcx.sdk.login_manager.whoami" +MOCK_BASE = "globus_compute_sdk.sdk.login_manager.whoami" @pytest.mark.parametrize( @@ -87,7 +86,7 @@ def test_whoami(response_output, mocker, monkeypatch): oa_mock.return_value.oauth2_userinfo.return_value = resp oa_mock.return_value.get_identities.return_value = profile monkeypatch.setattr( - funcx.sdk.login_manager.LoginManager, "get_auth_client", oa_mock + globus_compute_sdk.sdk.login_manager.LoginManager, "get_auth_client", oa_mock ) if has_err: diff --git a/funcx_sdk/tests/unit/test_ws_poller.py b/compute_sdk/tests/unit/test_ws_poller.py similarity index 81% rename from funcx_sdk/tests/unit/test_ws_poller.py rename to compute_sdk/tests/unit/test_ws_poller.py index daa43bffa..4b897bb9f 100644 --- a/funcx_sdk/tests/unit/test_ws_poller.py +++ b/compute_sdk/tests/unit/test_ws_poller.py @@ -3,9 +3,9 @@ import random import uuid -from funcx.sdk.asynchronous.funcx_future import FuncXFuture -from funcx.sdk.asynchronous.ws_polling_task import WebSocketPollingTask -from funcx.sdk.executor import AtomicController +from globus_compute_sdk.sdk.asynchronous.compute_future import ComputeFuture +from globus_compute_sdk.sdk.asynchronous.ws_polling_task import WebSocketPollingTask +from globus_compute_sdk.sdk.executor import AtomicController def _start(): @@ -17,10 +17,10 @@ def _stop(): def test_close_with_null_ws_state(mocker): - fxclient = mocker.MagicMock() + client = mocker.MagicMock() eventloop = asyncio.new_event_loop() wspt = WebSocketPollingTask( - funcx_client=fxclient, + funcx_client=client, loop=eventloop, atomic_controller=AtomicController(_start, _stop), auto_start=False, @@ -44,16 +44,16 @@ def test_polling_task_cancels_futures_upon_upstream_failure(mocker): ) mock_data_iter = iter(mock_data) tids = (md.get("task_id", uuid.uuid4()) for md in mock_data) - pending_futures = {tid: FuncXFuture(tid) for tid in tids} + pending_futures = {tid: ComputeFuture(tid) for tid in tids} futures = list(pending_futures.values()) async def mock_recv(): return json.dumps(next(mock_data_iter)) - fxclient = mocker.MagicMock() + client = mocker.MagicMock() eventloop = asyncio.new_event_loop() wspt = WebSocketPollingTask( - funcx_client=fxclient, + funcx_client=client, loop=eventloop, atomic_controller=AtomicController(_start, _stop), auto_start=False, @@ -73,15 +73,15 @@ async def mock_recv(): def test_malformed_response_handled_gracefully(mocker): - fxclient = mocker.MagicMock() + client = mocker.MagicMock() eventloop = asyncio.new_event_loop() wspt = WebSocketPollingTask( - funcx_client=fxclient, + funcx_client=client, loop=eventloop, atomic_controller=AtomicController(_start, _stop), auto_start=False, ) - task_fut = FuncXFuture() + task_fut = ComputeFuture() data = {"reason": "Jim bob Bonita Mae"} eventloop.run_until_complete(wspt.set_result(task_fut, data)) eventloop.close() diff --git a/funcx_sdk/tests/utils.py b/compute_sdk/tests/utils.py similarity index 100% rename from funcx_sdk/tests/utils.py rename to compute_sdk/tests/utils.py diff --git a/funcx_sdk/tox.ini b/compute_sdk/tox.ini similarity index 86% rename from funcx_sdk/tox.ini rename to compute_sdk/tox.ini index 9723e07aa..ec4ae67a8 100644 --- a/funcx_sdk/tox.ini +++ b/compute_sdk/tox.ini @@ -4,7 +4,7 @@ skip_missing_interpreters = true [testenv] passenv = - FUNCX_INTEGRATION_TEST_WEB_URL + COMPUTE_INTEGRATION_TEST_WEB_URL usedevelop = true extras = test commands = @@ -14,7 +14,7 @@ commands = [testenv:mypy] deps = mypy -commands = mypy funcx/ +commands = mypy globus_compute_sdk/ [testenv:publish-release] skip_install = true diff --git a/docs/Tutorial.rst b/docs/Tutorial.rst index aef1dcad6..6439190c0 100644 --- a/docs/Tutorial.rst +++ b/docs/Tutorial.rst @@ -1,32 +1,32 @@ -funcX Tutorial +Globus Compute Tutorial ============== -funcX is a Function-as-a-Service (FaaS) platform for science that enables you to register functions in a cloud-hosted service and then reliably execute those functions on a remote funcX endpoint. -This tutorial is configured to use a tutorial endpoint hosted by the funcX team. +Globus Compute is a Function-as-a-Service (FaaS) platform for science that enables you to register functions in a cloud-hosted service and then reliably execute those functions on a remote Globus Compute endpoint. +This tutorial is configured to use a tutorial endpoint hosted by the Globus Compute Team. You can setup and use your own endpoint by following the `endpoint documentation `_. -funcX Python SDK ----------------- +Globus Compute Python SDK +------------------------- -The funcX Python SDK provides programming abstractions for interacting with the funcX service. Before running this tutorial you should first install the funcX SDK in its own `venv `_ environment: +The Globus Compute Python SDK provides programming abstractions for interacting with the Globus Compute service. Before running this tutorial you should first install the Globus Compute SDK in its own `venv `_ environment: .. code-block:: bash - $ python3 -m venv path/to/funcx_venv - $ source path/to/funcx_venv/bin/activate - (funcx_venv) $ python3 -m pip install funcx + $ python3 -m venv path/to/globus_compute_venv + $ source path/to/globus_compute_venv/bin/activate + (globus_compute_venv) $ python3 -m pip install globus-compute-sdk -The funcX SDK exposes a ``FuncXClient`` object for all interactions with the funcX service. -In order to use the funcX service you must first authenticate using one of hundreds of supported identity provides (e.g., your institution, ORCID, Google). +The Globus Compute SDK exposes a ``Client`` object for all interactions with the Globus Compute service. +In order to use the Globus Compute service you must first authenticate using one of hundreds of supported identity provides (e.g., your institution, ORCID, Google). -As part of the authentication process you must grant permission for funcX to access your identity information (to retrieve your email address) and Globus Groups management access (to share endpoints). +As part of the authentication process you must grant permission for Globus Compute to access your identity information (to retrieve your email address), Globus Groups management access (to share functions and endpoints). .. code-block:: python - from funcx import FuncXClient + from globus_compute_sdk import Client - fxc = FuncXClient() + gcc = Client() Basic Usage ----------- @@ -36,18 +36,18 @@ The following example demonstrates how you can register and execute a function. Registering a Function ~~~~~~~~~~~~~~~~~~~~~~ -funcX works like any other FaaS platform, you must first register a function with funcX before being able to execute it on a remote endpoint. -The registration process will serialize the function body and store it securely in the funcX service. +Globus Compute works like any other FaaS platform, you must first register a function with Globus Compute before being able to execute it on a remote endpoint. +The registration process will serialize the function body and store it securely in the Globus Compute service. As we will see below, you may share functions with others and discover functions shared with you. -Upon registration funcX will return a UUID for the function. This UUID can then be used to manage and invoke the function. +Upon registration Globus Compute will return a UUID for the function. This UUID can then be used to manage and invoke the function. .. code-block:: python def hello_world(): return "Hello World!" - func_uuid = fxc.register_function(hello_world) + func_uuid = gcc.register_function(hello_world) print(func_uuid) @@ -55,47 +55,47 @@ Running a Function ~~~~~~~~~~~~~~~~~~ To invoke a function, you must provide a) the function's UUID; and b) the ``endpoint_id`` of the endpoint on which you wish to execute that function. -Note: here we use the public funcX tutorial endpoint, you may change the ``endpoint_id`` to the UUID of any endpoint for which you have permission to execute functions. +Note: here we use the public Globus Compute tutorial endpoint, you may change the ``endpoint_id`` to the UUID of any endpoint for which you have permission to execute functions. -funcX functions are designed to be executed remotely and asynchrously. +Globus Compute functions are designed to be executed remotely and asynchrously. To avoid synchronous invocation, the result of a function invocation (called a ``task``) is a UUID which may be introspected to monitor execution status and retrieve results. -The funcX service will manage the reliable execution of a task, for example by qeueing tasks when the endpoint is busy or offline and retrying tasks in case of node failures. +The Globus Compute service will manage the reliable execution of a task, for example by qeueing tasks when the endpoint is busy or offline and retrying tasks in case of node failures. .. code-block:: python tutorial_endpoint = '4b116d3c-1703-4f8f-9f6f-39921e5864df' # Public tutorial endpoint - res = fxc.run(endpoint_id=tutorial_endpoint, function_id=func_uuid) + res = gcc.run(endpoint_id=tutorial_endpoint, function_id=func_uuid) print(res) Retrieving Results ~~~~~~~~~~~~~~~~~~ -When the task has completed executing you can access the results via the funcX client as follows. +When the task has completed executing you can access the results via the Globus Compute client as follows. .. code-block:: python - fxc.get_result(res) + gcc.get_result(res) Functions with Arguments ~~~~~~~~~~~~~~~~~~~~~~~~ -funcX supports registration and invocation of functions with arbitrary arguments and returned parameters. -funcX will serialize any ``*args`` and ``**kwargs`` when invoking a function and it will serialize any return parameters or exceptions. +Globus Compute supports registration and invocation of functions with arbitrary arguments and returned parameters. +Globus Compute will serialize any ``*args`` and ``**kwargs`` when invoking a function and it will serialize any return parameters or exceptions. .. note:: - funcX uses standard Python serilaization libraries (e.g., Pickle, Dill) it also limits the size of input arguments and returned parameters to 5MB. + Globus Compute uses standard Python serilaization libraries (e.g., Pickle, Dill) it also limits the size of input arguments and returned parameters to 5MB. The following example shows a function that computes the sum of a list of input arguments. First we register the function as above. .. code-block:: python - def funcx_sum(items): + def get_sum(items): return sum(items) - sum_function = fxc.register_function(funcx_sum) + sum_function = gcc.register_function(get_sum) When invoking the function you can pass in arguments like any other function, either by position or with keyword arguments. @@ -103,62 +103,62 @@ When invoking the function you can pass in arguments like any other function, ei items = [1, 2, 3, 4, 5] - res = fxc.run(items, endpoint_id=tutorial_endpoint, function_id=sum_function) + res = gcc.run(items, endpoint_id=tutorial_endpoint, function_id=sum_function) - print (fxc.get_result(res)) + print (gcc.get_result(res)) Functions with Dependencies ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -funcX requires that functions explictly state all dependencies within the function body. +Globus Compute requires that functions explictly state all dependencies within the function body. It also assumes that the dependent libraries are available on the endpoint in which the function will execute. For example, in the following function we import from ``datetime``: .. code-block:: python - def funcx_date(): + def get_date(): from datetime import date return date.today() - date_function = fxc.register_function(funcx_date) + date_function = gcc.register_function(get_date) - res = fxc.run(endpoint_id=tutorial_endpoint, function_id=date_function) + res = gcc.run(endpoint_id=tutorial_endpoint, function_id=date_function) - print (fxc.get_result(res)) + print (gcc.get_result(res)) Calling External Applications ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Depending on the configuration of the funcX endpoint you can often invoke external applications that are avaialble in the endpoint environment. +Depending on the configuration of the Globus Compute endpoint you can often invoke external applications that are avaialble in the endpoint environment. .. code-block:: python - def funcx_echo(name): + def echo(name): import os return os.popen("echo Hello %s" % name).read() - echo_function = fxc.register_function(funcx_echo) + echo_function = gcc.register_function(echo) - res = fxc.run("World", endpoint_id=tutorial_endpoint, function_id=echo_function) + res = gcc.run("World", endpoint_id=tutorial_endpoint, function_id=echo_function) - print (fxc.get_result(res)) + print (gcc.get_result(res)) Catching Exceptions ~~~~~~~~~~~~~~~~~~~ -When functions fail, the exception is captured and serialized by the funcX endpoint, and reraised when you try to get the result. -In the following example, the "deterministic failure" exception is raised when ``fxc.get_result`` is called on the failing function. +When functions fail, the exception is captured and serialized by the Globus Compute endpoint, and reraised when you try to get the result. +In the following example, the "deterministic failure" exception is raised when ``gcc.get_result`` is called on the failing function. .. code-block:: python def failing(): raise Exception("deterministic failure") - failing_function = fxc.register_function(failing) + failing_function = gcc.register_function(failing) - res = fxc.run(endpoint_id=tutorial_endpoint, function_id=failing_function) + res = gcc.run(endpoint_id=tutorial_endpoint, function_id=failing_function) - fxc.get_result(res) + gcc.get_result(res) Running Functions Many Times ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -184,23 +184,23 @@ Thus, if N uniformly-distributed random points are dropped within the square, ap return (inside*4 / num_points) # register the function - pi_function = fxc.register_function(pi) + pi_function = gcc.register_function(pi) # execute the function 3 times estimates = [] for i in range(3): - estimates.append(fxc.run(10**5, endpoint_id=tutorial_endpoint, function_id=pi_function)) + estimates.append(gcc.run(10**5, endpoint_id=tutorial_endpoint, function_id=pi_function)) # wait for tasks to complete time.sleep(5) # wait for all tasks to complete for e in estimates: - while fxc.get_task(e)['pending'] == 'True': + while gcc.get_task(e)['pending'] == 'True': time.sleep(3) # get the results and calculate the total - results = [fxc.get_result(i) for i in estimates] + results = [gcc.get_result(i) for i in estimates] total = 0 for r in results: total += r @@ -212,13 +212,13 @@ Thus, if N uniformly-distributed random points are dropped within the square, ap Managing Endpoints ~~~~~~~~~~~~~~~~~~ -funcX endpoints advertise whether or not they are online as well as information about their avaialble resources, queued tasks, and other information. +Globus Compute endpoints advertise whether or not they are online as well as information about their avaialble resources, queued tasks, and other information. If you are permitted to execute functions on an endpoint you can also retrieve the status of the endpoint. The following example shows how to look up the status (online or offline) and the number of number of waiting tasks and workers connected to the endpoint. .. code-block:: python - endpoint_status = fxc.get_endpoint_status(tutorial_endpoint) + endpoint_status = gcc.get_endpoint_status(tutorial_endpoint) print("Status: %s" % endpoint_status['status']) print("Workers: %s" % endpoint_status['logs'][0]['total_workers']) @@ -227,16 +227,16 @@ The following example shows how to look up the status (online or offline) and th Advanced Features ----------------- -funcX provides several features that address more advanced use cases. +Globus Compute provides several features that address more advanced use cases. Running Batches ~~~~~~~~~~~~~~~ -After registering a function, you might want to invoke that function many times without making individual calls to the funcX service. +After registering a function, you might want to invoke that function many times without making individual calls to the Globus Compute service. Such examples occur when running monte carlo simulations, ensembles, and parameter sweep applications. -funcX provides a batch interface which enables specification of a range of function invocations. -To use this interface you must create a funcX batch object and then add each invocation to that object. +Globus Compute provides a batch interface which enables specification of a range of function invocations. +To use this interface you must create a Globus Compute batch object and then add each invocation to that object. You can then pass the constructed object to the ``batch_run`` interface. .. code-block:: python @@ -244,18 +244,18 @@ You can then pass the constructed object to the ``batch_run`` interface. def squared(x): return x**2 - squared_function = fxc.register_function(squared) + squared_function = gcc.register_function(squared) inputs = list(range(10)) - batch = fxc.create_batch() + batch = gcc.create_batch() for x in inputs: batch.add(x, endpoint_id=tutorial_endpoint, function_id=squared_function) - batch_res = fxc.batch_run(batch) + batch_res = gcc.batch_run(batch) -Similary, funcX provides an interface to retrieve the status of the entire batch of invocations. +Similary, Globus Compute provides an interface to retrieve the status of the entire batch of invocations. .. code-block:: python - fxc.get_batch_result(batch_res) + gcc.get_batch_result(batch_res) diff --git a/docs/images/ALCF_Polaris.jpeg b/docs/_static/images/ALCF_Polaris.jpeg similarity index 100% rename from docs/images/ALCF_Polaris.jpeg rename to docs/_static/images/ALCF_Polaris.jpeg diff --git a/docs/_static/images/compute-model.png b/docs/_static/images/compute-model.png new file mode 100644 index 000000000..2fe857fe4 Binary files /dev/null and b/docs/_static/images/compute-model.png differ diff --git a/docs/_static/images/compute-vertical.svg b/docs/_static/images/compute-vertical.svg new file mode 100644 index 000000000..b3ed243b2 --- /dev/null +++ b/docs/_static/images/compute-vertical.svg @@ -0,0 +1,28 @@ + + + + + diff --git a/docs/_static/images/funcX-dark-cropped.png b/docs/_static/images/funcX-dark-cropped.png deleted file mode 100644 index b1d7a04e3..000000000 Binary files a/docs/_static/images/funcX-dark-cropped.png and /dev/null differ diff --git a/docs/_static/images/funcX-light-cropped.png b/docs/_static/images/funcX-light-cropped.png deleted file mode 100644 index 6cad947a0..000000000 Binary files a/docs/_static/images/funcX-light-cropped.png and /dev/null differ diff --git a/docs/_static/images/globus-300x300-blue.png b/docs/_static/images/globus-300x300-blue.png new file mode 100644 index 000000000..2bdb50854 Binary files /dev/null and b/docs/_static/images/globus-300x300-blue.png differ diff --git a/docs/_static/logo.png b/docs/_static/logo.png deleted file mode 100644 index 5f4eb79bb..000000000 Binary files a/docs/_static/logo.png and /dev/null differ diff --git a/docs/actionprovider.rst b/docs/actionprovider.rst index 0d3d50e10..d831bc3e2 100644 --- a/docs/actionprovider.rst +++ b/docs/actionprovider.rst @@ -1,10 +1,10 @@ Globus Flows Action Provider ============================ -funcX exposes an asynchronous `Action Provider `_ +Globus Compute exposes an asynchronous `Action Provider `_ interface to allow functions to be used in a `Globus Flow `_. -The funcX Action Provider interface uses: +The Globus Compute Action Provider interface uses: * ``ActionUrl`` -- 'https://automate.funcx.org' * ``ActionScope`` -- 'https://auth.globus.org/scopes/b3db7e59-a6f1-4947-95c2-59d6b7a70f8c/action_all' @@ -18,12 +18,12 @@ The endpoint and function arguments are UUIDs and the payload is a dictionary of .. code-block:: - 'tasks': [{'endpoint.$': '', - 'function': '', + 'tasks': [{'endpoint.$': '', + 'function': '', 'payload.$': ''}], -When defining a funcX function to use within a flow it is recommended to define the specific kwargs that will be passed in as payload. +When defining a Globus Compute function to use within a flow it is recommended to define the specific kwargs that will be passed in as payload. If the kwargs are not known, a function can be defined to accept arbitrary kwargs using the ``**`` operator, e.g.: .. code-block:: @@ -40,7 +40,7 @@ Gladier ------- The `Gladier `_ toolkit provides useful tools to simplify and accelerate -the development of flows that use funcX. For example, Gladier validates inputs prior to starting a flow and will re-register +the development of flows that use Globus Compute. For example, Gladier validates inputs prior to starting a flow and will re-register functions when they are modified. Additionally, it includes capabilities to automatically generate flow definitions. diff --git a/docs/conf.py b/docs/conf.py index 1125af2cb..f805f11ba 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,19 +1,19 @@ -import funcx # noqa:E402 +import globus_compute_sdk # noqa:E402 # -- Project information ----------------------------------------------------- -project = "funcX" +project = "Globus Compute" copyright = "2019, The University of Chicago" -author = "The funcX Team" +author = "The Globus Compute Team" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = funcx.__version__.rsplit(".", 1)[0] +version = globus_compute_sdk.__version__.rsplit(".", 1)[0] # The full version, including alpha/beta/rc tags. -release = funcx.__version__ +release = globus_compute_sdk.__version__ # -- General configuration --------------------------------------------------- @@ -44,7 +44,9 @@ html_show_sourcelink = True html_theme = "furo" html_static_path = ["_static"] + +# Potentially different pngs for light/dark in the future html_theme_options = { - "light_logo": "images/funcX-light-cropped.png", - "dark_logo": "images/funcX-dark-cropped.png", + "light_logo": "images/globus-300x300-blue.png", + "dark_logo": "images/globus-300x300-blue.png", } diff --git a/docs/configs/bebop.py b/docs/configs/bebop.py index 2045a0f06..74dbf77bf 100644 --- a/docs/configs/bebop.py +++ b/docs/configs/bebop.py @@ -1,10 +1,9 @@ +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.executors import HighThroughputExecutor from parsl.addresses import address_by_interface from parsl.launchers import SrunLauncher from parsl.providers import SlurmProvider -from funcx_endpoint.endpoint.utils.config import Config -from funcx_endpoint.executors import HighThroughputExecutor - # fmt: off # PLEASE UPDATE user_opts BEFORE USE diff --git a/docs/configs/bluewaters.py b/docs/configs/bluewaters.py index eddd6588c..6eb78aae8 100644 --- a/docs/configs/bluewaters.py +++ b/docs/configs/bluewaters.py @@ -1,10 +1,9 @@ +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.executors import HighThroughputExecutor from parsl.addresses import address_by_hostname from parsl.launchers import AprunLauncher from parsl.providers import TorqueProvider -from funcx_endpoint.endpoint.utils.config import Config -from funcx_endpoint.executors import HighThroughputExecutor - # fmt: off # PLEASE UPDATE user_opts BEFORE USE @@ -29,7 +28,7 @@ scheduler_options=user_opts['bluewaters']['scheduler_options'], # Command to be run before starting a worker, such as: - # 'module load bwpy; source activate funcx env'. + # 'module load bwpy; source activate compute env'. worker_init=user_opts['bluewaters']['worker_init'], # Scale between 0-1 blocks with 2 nodes per block diff --git a/docs/configs/cooley.py b/docs/configs/cooley.py index dcb78df9a..09246c3de 100644 --- a/docs/configs/cooley.py +++ b/docs/configs/cooley.py @@ -1,16 +1,15 @@ +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.executors import HighThroughputExecutor from parsl.addresses import address_by_interface from parsl.launchers import MpiExecLauncher from parsl.providers import CobaltProvider -from funcx_endpoint.endpoint.utils.config import Config -from funcx_endpoint.executors import HighThroughputExecutor - # fmt: off # PLEASE UPDATE user_opts BEFORE USE user_opts = { 'cooley': { - 'worker_init': 'source ~/setup_funcx_test_env.sh', + 'worker_init': 'source ~/setup_compute_test_env.sh', 'scheduler_options': '', # Specify the account/allocation to which jobs should be charged 'account': '' @@ -32,7 +31,7 @@ scheduler_options=user_opts['cooley']['scheduler_options'], # Command to be run before starting a worker, such as: - # 'module load Anaconda; source activate funcx_env'. + # 'module load Anaconda; source activate compute_env'. worker_init=user_opts['cooley']['worker_init'], # Scale between 0-1 blocks with 2 nodes per block @@ -48,4 +47,4 @@ ], ) -# fmt: onrom funcx_endpoint.endpoint.utils.config import Config +# fmt: onrom compute_endpoint.endpoint.utils.config import Config diff --git a/docs/configs/cori.py b/docs/configs/cori.py index b401ef715..671006b68 100644 --- a/docs/configs/cori.py +++ b/docs/configs/cori.py @@ -1,16 +1,15 @@ +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.executors import HighThroughputExecutor from parsl.addresses import address_by_interface from parsl.launchers import SrunLauncher from parsl.providers import SlurmProvider -from funcx_endpoint.endpoint.utils.config import Config -from funcx_endpoint.executors import HighThroughputExecutor - # fmt: off # PLEASE UPDATE user_opts BEFORE USE user_opts = { 'cori': { - 'worker_init': 'source ~/setup_funcx_test_env.sh', + 'worker_init': 'source ~/setup_compute_test_env.sh', 'scheduler_options': '#SBATCH --constraint=knl,quad,cache', } } diff --git a/docs/configs/frontera.py b/docs/configs/frontera.py index f4f69aeb7..b3b5e413f 100644 --- a/docs/configs/frontera.py +++ b/docs/configs/frontera.py @@ -1,16 +1,15 @@ +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.executors import HighThroughputExecutor from parsl.addresses import address_by_interface from parsl.launchers import SrunLauncher from parsl.providers import SlurmProvider -from funcx_endpoint.endpoint.utils.config import Config -from funcx_endpoint.executors import HighThroughputExecutor - # fmt: off # PLEASE UPDATE user_opts BEFORE USE user_opts = { 'frontera': { - 'worker_init': 'source ~/setup_funcx_test_env.sh', + 'worker_init': 'source ~/setup_compute_test_env.sh', 'account': 'EAR22001', 'partition': 'development', 'scheduler_options': '', diff --git a/docs/configs/kube.py b/docs/configs/kube.py index ea123b16b..a05fd7561 100644 --- a/docs/configs/kube.py +++ b/docs/configs/kube.py @@ -1,16 +1,15 @@ +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.executors import HighThroughputExecutor +from globus_compute_endpoint.providers.kubernetes.kube import KubernetesProvider +from globus_compute_endpoint.strategies import KubeSimpleStrategy from parsl.addresses import address_by_route -from funcx_endpoint.endpoint.utils.config import Config -from funcx_endpoint.executors import HighThroughputExecutor -from funcx_endpoint.providers.kubernetes.kube import KubernetesProvider -from funcx_endpoint.strategies import KubeSimpleStrategy - # fmt: off # PLEASE UPDATE user_opts BEFORE USE user_opts = { 'kube': { - 'worker_init': 'pip install --force-reinstall funcx_endpoint>=0.2.0', + 'worker_init': 'pip install --force-reinstall globus_compute_endpoint>=2.0.1', 'image': 'python:3.8-buster', 'namespace': 'default', } diff --git a/docs/configs/midway.py b/docs/configs/midway.py index e60d140ee..db50fa637 100644 --- a/docs/configs/midway.py +++ b/docs/configs/midway.py @@ -1,16 +1,15 @@ +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.executors import HighThroughputExecutor from parsl.addresses import address_by_hostname from parsl.launchers import SrunLauncher from parsl.providers import SlurmProvider -from funcx_endpoint.endpoint.utils.config import Config -from funcx_endpoint.executors import HighThroughputExecutor - # fmt: off # PLEASE UPDATE user_opts BEFORE USE user_opts = { 'midway': { - 'worker_init': 'source ~/setup_funcx_test_env.sh', + 'worker_init': 'source ~/setup_compute_test_env.sh', 'scheduler_options': '', } } diff --git a/docs/configs/midway_singularity.py b/docs/configs/midway_singularity.py index ca54af0f4..89fcc2227 100644 --- a/docs/configs/midway_singularity.py +++ b/docs/configs/midway_singularity.py @@ -1,16 +1,15 @@ +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.executors import HighThroughputExecutor from parsl.addresses import address_by_hostname from parsl.launchers import SrunLauncher from parsl.providers import SlurmProvider -from funcx_endpoint.endpoint.utils.config import Config -from funcx_endpoint.executors import HighThroughputExecutor - # fmt: off # PLEASE UPDATE user_opts BEFORE USE user_opts = { 'midway': { - 'worker_init': 'source ~/setup_funcx_test_env.sh', + 'worker_init': 'source ~/setup_compute_test_env.sh', 'scheduler_options': '', } } diff --git a/docs/configs/perlmutter.py b/docs/configs/perlmutter.py index d40d93435..d4bdcf803 100644 --- a/docs/configs/perlmutter.py +++ b/docs/configs/perlmutter.py @@ -1,16 +1,15 @@ +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.executors import HighThroughputExecutor from parsl.addresses import address_by_interface from parsl.launchers import SrunLauncher from parsl.providers import SlurmProvider -from funcx_endpoint.endpoint.utils.config import Config -from funcx_endpoint.executors import HighThroughputExecutor - # fmt: off # PLEASE UPDATE user_opts BEFORE USE user_opts = { 'perlmutter': { - 'worker_init': 'source ~/setup_funcx_test_env.sh', + 'worker_init': 'source ~/setup_compute_test_env.sh', 'scheduler_options': '#SBATCH -C gpu' } } diff --git a/docs/configs/polaris.py b/docs/configs/polaris.py index 8c2a76e7e..690b1039d 100644 --- a/docs/configs/polaris.py +++ b/docs/configs/polaris.py @@ -1,11 +1,10 @@ +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.executors import HighThroughputExecutor +from globus_compute_endpoint.strategies import SimpleStrategy from parsl.addresses import address_by_interface from parsl.launchers import SingleNodeLauncher from parsl.providers import PBSProProvider -from funcx_endpoint.endpoint.utils.config import Config -from funcx_endpoint.executors import HighThroughputExecutor -from funcx_endpoint.strategies import SimpleStrategy - # fmt: off # PLEASE UPDATE user_opts BEFORE USE diff --git a/docs/configs/theta.py b/docs/configs/theta.py index c4aa21f6a..d0d0bb215 100644 --- a/docs/configs/theta.py +++ b/docs/configs/theta.py @@ -1,16 +1,15 @@ +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.executors import HighThroughputExecutor from parsl.addresses import address_by_interface from parsl.launchers import AprunLauncher from parsl.providers import CobaltProvider -from funcx_endpoint.endpoint.utils.config import Config -from funcx_endpoint.executors import HighThroughputExecutor - # fmt: off # PLEASE UPDATE user_opts BEFORE USE user_opts = { 'theta': { - 'worker_init': 'source ~/setup_funcx_test_env.sh', + 'worker_init': 'source ~/setup_compute_test_env.sh', 'scheduler_options': '', # Specify the account/allocation to which jobs should be charged 'account': '' @@ -33,7 +32,7 @@ scheduler_options=user_opts['theta']['scheduler_options'], # Command to be run before starting a worker, such as: - # 'module load Anaconda; source activate funcx_env'. + # 'module load Anaconda; source activate compute_env'. worker_init=user_opts['theta']['worker_init'], # Scale between 0-1 blocks with 2 nodes per block diff --git a/docs/configs/theta_singularity.py b/docs/configs/theta_singularity.py index 3cea585b5..584fe185e 100644 --- a/docs/configs/theta_singularity.py +++ b/docs/configs/theta_singularity.py @@ -1,16 +1,15 @@ +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.executors import HighThroughputExecutor from parsl.addresses import address_by_interface from parsl.launchers import AprunLauncher from parsl.providers import CobaltProvider -from funcx_endpoint.endpoint.utils.config import Config -from funcx_endpoint.executors import HighThroughputExecutor - # fmt: off # PLEASE UPDATE user_opts BEFORE USE user_opts = { 'theta': { - 'worker_init': 'source ~/setup_funcx_test_env.sh', + 'worker_init': 'source ~/setup_compute_test_env.sh', 'scheduler_options': '', # Specify the account/allocation to which jobs should be charged 'account': '' @@ -37,7 +36,7 @@ scheduler_options=user_opts['theta']['scheduler_options'], # Command to be run before starting a worker, such as: - # 'module load Anaconda; source activate funcx_env'. + # 'module load Anaconda; source activate compute_env'. worker_init=user_opts['theta']['worker_init'], # Scale between 0-1 blocks with 2 nodes per block diff --git a/docs/configs/uchicago_ai_cluster.py b/docs/configs/uchicago_ai_cluster.py index ee6eefc36..79a767007 100644 --- a/docs/configs/uchicago_ai_cluster.py +++ b/docs/configs/uchicago_ai_cluster.py @@ -1,10 +1,9 @@ +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.executors import HighThroughputExecutor from parsl.addresses import address_by_interface from parsl.launchers import SrunLauncher from parsl.providers import SlurmProvider -from funcx_endpoint.endpoint.utils.config import Config -from funcx_endpoint.executors import HighThroughputExecutor - # fmt: off diff --git a/docs/configs/worker_pinning.py b/docs/configs/worker_pinning.py index 1823a56b2..f074ad39a 100644 --- a/docs/configs/worker_pinning.py +++ b/docs/configs/worker_pinning.py @@ -1,10 +1,9 @@ # fmt: off +from globus_compute_endpoint.endpoint.utils.config import Config +from globus_compute_endpoint.executors import HighThroughputExecutor from parsl.providers import LocalProvider -from funcx_endpoint.endpoint.utils.config import Config -from funcx_endpoint.executors import HighThroughputExecutor - config = Config( executors=[ HighThroughputExecutor( diff --git a/docs/configuring.rst b/docs/configuring.rst index 3fdb817e5..1cb2caadf 100644 --- a/docs/configuring.rst +++ b/docs/configuring.rst @@ -1,8 +1,8 @@ .. _configuration-section: -funcX has been used on various systems around the world. Below are example configurations +Globus Compute has been used on various systems around the world. Below are example configurations for commonly used systems. If you would like to add your system to this list please -contact the funcX team via Slack. +contact the Globus Compute Team via Slack. .. note:: All configuration examples below must be customized for the user's @@ -92,7 +92,7 @@ using the ``CobaltProvider``. This configuration assumes that the script is bein Polaris (ALCF) ^^^^^^^^^^^^^^ -.. image:: images/ALCF_Polaris.jpeg +.. image:: _static/images/ALCF_Polaris.jpeg The following snippet shows an example configuration for executing on Argonne Leadership Computing Facility's **Polaris** cluster. This example uses the ``HighThroughputExecutor`` and connects to Polaris's PBS scheduler @@ -120,7 +120,7 @@ Perlmutter (NERSC) The following snippet shows an example configuration for accessing NERSC's **Perlmutter** supercomputer. This example uses the ``HighThroughputExecutor`` and connects to Perlmutters's Slurm scheduler. It is configured to request 2 nodes configured with 1 TaskBlock per node. Finally, it includes override information to request a particular node type (Haswell) and to configure a specific Python environment on the worker nodes using Anaconda. -.. note:: Please run ``module load cgpu`` prior to executing ``funcx-endpoint start `` +.. note:: Please run ``module load cgpu`` prior to executing ``globus-compute-endpoint start `` on the Cori login nodes to access the Perlmutter queues. .. literalinclude:: configs/perlmutter.py @@ -152,9 +152,9 @@ Pinning Workers to devices ^^^^^^^^^^^^^^^^^^^^^^^^^^ Many modern clusters provide multiple accelerators per compute note, yet many applications are best suited to using a -single accelerator per task. funcX supports pinning each worker to different accelerators using the ``available_accelerators`` -option of the ``HighThroughputExecutor``. Provide either the number of accelerators (funcX will assume they are named -in integers starting from zero) or a list of the names of the accelerators available on the node. Each funcX worker +single accelerator per task. Globus Compute supports pinning each worker to different accelerators using the ``available_accelerators`` +option of the ``HighThroughputExecutor``. Provide either the number of accelerators (Globus Compute will assume they are named +in integers starting from zero) or a list of the names of the accelerators available on the node. Each Globus Compute worker will have the following environment variables set to the worker specific identity assigned: ``CUDA_VISIBLE_DEVICES``, ``ROCR_VISIBLE_DEVICES``, ``SYCL_DEVICE_FILTER``. diff --git a/docs/endpoints.rst b/docs/endpoints.rst index 360040360..fd6db99fd 100644 --- a/docs/endpoints.rst +++ b/docs/endpoints.rst @@ -2,71 +2,71 @@ Endpoints ========= An endpoint is a persistent service launched by the user on a compute system to serve as a conduit for -executing functions on that computer. funcX supports a range of target systems, enabling +executing functions on that computer. Globus Compute supports a range of target systems, enabling an endpoint to be deployed on a laptop, the login node of a campus cluster, a cloud instance, or a Kubernetes cluster, for example. The endpoint requires outbound network connectivity. That is, it must be able to connect to -funcX at `funcx.org `_. +Globus Compute at `funcx.org `_. -The funcX endpoint is available on pypi.org (and thus available via ``pip``). -However, *we strongly recommend installing the funcX endpoint into an isolated virtual environment*. +The Globus Compute endpoint is available on pypi.org (and thus available via ``pip``). +However, *we strongly recommend installing the Globus Compute endpoint into an isolated virtual environment*. `Pipx `_ automatically manages -package-specific virtual environments for command line applications, so install funcX endpoint via:: +package-specific virtual environments for command line applications, so install Globus Compute endpoint via:: - $ python3 -m pipx install funcx_endpoint + $ python3 -m pipx install globus-compute-endpoint .. note:: - Please note that the funcX endpoint is only supported on Linux. + Please note that the Globus Compute endpoint is only supported on Linux. -After installing the funcX endpoint, use the ``funcx-endpoint`` command to manage existing endpoints. +After installing the Globus Compute endpoint, use the ``globus-compute-endpoint`` command to manage existing endpoints. First time setup ---------------- -You will be required to authenticate the first time you run ``funcx-endpoint``. +You will be required to authenticate the first time you run ``globus-compute-endpoint``. If you have authenticated previously, the endpoint will cache access tokens in the local configuration file. -funcX requires authentication in order to associate +Globus Compute requires authentication in order to associate endpoints with users and ensure only authorized users can run tasks on that endpoint. As part of this step, we request access to your identity and Globus Groups. To get started, you will first want to configure a new endpoint. :: - $ funcx-endpoint configure + $ globus-compute-endpoint configure -Once you've run this command, a directory will be created at ``$HOME/.funcx`` and a set of default configuration files will be generated. +Once you've run this command, a directory will be created at ``$HOME/.globus_compute`` and a set of default configuration files will be generated. -You can also set up auto-completion for the ``funcx-endpoint`` commands in your shell, by using the command :: +You can also set up auto-completion for the ``globus-compute-endpoint`` commands in your shell, by using the command :: - $ funcx-endpoint --install-completion [zsh bash fish ...] + $ globus-compute-endpoint --install-completion [zsh bash fish ...] Configuring an Endpoint ----------------------- -funcX endpoints act as gateways to diverse computational resources, including clusters, clouds, +Globus Compute endpoints act as gateways to diverse computational resources, including clusters, clouds, supercomputers, and even your laptop. To make the best use of your resources, the endpoint must be configured to match the capabilities of the resource on which it is deployed. -funcX provides a Python class-based configuration model that allows you to specify the shape of the +Globus Compute provides a Python class-based configuration model that allows you to specify the shape of the resources (number of nodes, number of cores per worker, walltime, etc.) as well as allowing you to place -limits on how funcX may scale the resources in response to changing workload demands. +limits on how Globus Compute may scale the resources in response to changing workload demands. To generate the appropriate directories and default configuration template, run the following command:: - $ funcx-endpoint configure + $ globus-compute-endpoint configure -This command will create a profile for your endpoint in ``$HOME/.funcx//`` and will instantiate a +This command will create a profile for your endpoint in ``$HOME/.globus_compute//`` and will instantiate a ``config.py`` file. This file should be updated with the appropriate configurations for the computational system you are targeting before you start the endpoint. -funcX is configured using a :class:`~funcx_endpoint.endpoint.utils.config.Config` object. -funcX uses `Parsl `_ to manage resources. For more information, -see the :class:`~funcx_endpoint.endpoint.utils.config.Config` class documentation and the +Globus Compute is configured using a :class:`~compute_endpoint.endpoint.utils.config.Config` object. +Globus Compute uses `Parsl `_ to manage resources. For more information, +see the :class:`~compute_endpoint.endpoint.utils.config.Config` class documentation and the `Parsl documentation `_ . .. note:: If the ENDPOINT_NAME is not specified, a default endpoint named "default" is configured. @@ -77,14 +77,14 @@ Starting an Endpoint To start a new endpoint run the following command:: - $ funcx-endpoint start + $ globus-compute-endpoint start .. note:: If the ENDPOINT_NAME is not specified, a default endpoint named "default" is started. -Starting an endpoint will perform a registration process with funcX. -The registration process provides funcX with information regarding the endpoint. +Starting an endpoint will perform a registration process with Globus Compute. +The registration process provides Globus Compute with information regarding the endpoint. The endpoint also establishes an outbound connection to RabbitMQ to retrieve tasks, send results, and communicate command information. -Thus, the funcX endpoint requires outbound access to the funcX services over HTTPS (port 443) and AMQPS (port 5671). +Thus, the Globus Compute endpoint requires outbound access to the Globus Compute services over HTTPS (port 443) and AMQPS (port 5671). Once started, the endpoint uses a daemon process to run in the background. @@ -97,7 +97,7 @@ Once started, the endpoint uses a daemon process to run in the background. To start an endpoint using a client identity, rather than as a user, you can export the FUNCX_SDK_CLIENT_ID and FUNCX_SDK_CLIENT_SECRET -environment variables. This is explained in detail in :ref:`client credentials with funcxclients`. +environment variables. This is explained in detail in :ref:`client credentials with globus compute clients`. Stopping an Endpoint @@ -105,7 +105,7 @@ Stopping an Endpoint To stop an endpoint, run the following command:: - $ funcx-endpoint stop + $ globus-compute-endpoint stop If the endpoint is not running and was stopped correctly previously, this command does nothing. @@ -115,14 +115,14 @@ can be started cleanly again. .. note:: If the ENDPOINT_NAME is not specified, the default endpoint is stopped. -.. warning:: Run the ``funcx-endpoint stop`` command **twice** to ensure that the endpoint is shutdown. +.. warning:: Run the ``globus-compute-endpoint stop`` command **twice** to ensure that the endpoint is shutdown. Listing Endpoints ----------------- To list available endpoints on the current system, run:: - $ funcx-endpoint list + $ globus-compute-endpoint list +---------------+-------------+--------------------------------------+ | Endpoint Name | Status | Endpoint ID | +===============+=============+======================================+ @@ -136,7 +136,7 @@ To list available endpoints on the current system, run:: Endpoints can be the following states: * **Initialized**: The endpoint has been created, but not started - following configuration and is not registered with the `funcx service`. + following configuration and is not registered with the `Globus Compute service`. * **Running**: The endpoint is active and available for executing functions. * **Stopped**: The endpoint was stopped by the user. It is not running and therefore, cannot service any functions. It can be started again without issues. @@ -148,24 +148,24 @@ Endpoints can be the following states: Container behaviors and routing ------------------------------- -The funcX endpoint can run functions using independent Python processes or optionally -inside containers. funcX supports various container technologies (e.g., docker and singularity) +The Globus Compute endpoint can run functions using independent Python processes or optionally +inside containers. Globus Compute supports various container technologies (e.g., docker and singularity) and different routing mechanisms for different use cases. Raw worker processes (``worker_mode=no_container``): -* Hard routing: All worker processes are of the same type "RAW". It this case, the funcx endpoint simply routes tasks to any available worker processes. This is the default mode of a funcx endpoint. +* Hard routing: All worker processes are of the same type "RAW". It this case, the Globus Compute endpoint simply routes tasks to any available worker processes. This is the default mode of a Globus Compute endpoint. * Soft routing: It is the same as hard routing. Kubernetes (docker): -* Hard routing: Both the manager and the worker are deployed within a pod and thus the manager cannot change the type of worker container. In this case, a set of managers are deployed with specific container images and the funcx endpoint simply routes tasks to corresponding managers (matching their types). +* Hard routing: Both the manager and the worker are deployed within a pod and thus the manager cannot change the type of worker container. In this case, a set of managers are deployed with specific container images and the Globus Compute endpoint simply routes tasks to corresponding managers (matching their types). * Soft routing: NOT SUPPORTED. Native container support (docker, singularity, shifter): * Hard routing: In this case, each manager (on a compute node) can only launch worker containers of a specific type and thus each manager can serve only one type of function. -* Soft routing: When receiving a task for a specific container type, the funcx endpoint attempts to send the task to a manager that has a suitable warm container to minimize the total number of container cold starts. If there are not any warmed containers in any connected managers, the funcX endpoint chooses one manager randomly to dispatch the task. +* Soft routing: When receiving a task for a specific container type, the Globus Compute endpoint attempts to send the task to a manager that has a suitable warm container to minimize the total number of container cold starts. If there are not any warmed containers in any connected managers, the Globus Compute endpoint chooses one manager randomly to dispatch the task. Example configurations diff --git a/docs/executor.rst b/docs/executor.rst index a86c27ff6..925fa9321 100644 --- a/docs/executor.rst +++ b/docs/executor.rst @@ -1,36 +1,36 @@ -funcX Executor +Globus Compute Executor ============== -The |FuncXExecutor|_ class, a subclass of Python's |Executor|_, is the -preferred approach to collecting results from the funcX web services. Over +The |Executor|_ class, a subclass of Python's |Executor|_, is the +preferred approach to collecting results from the Globus Compute web services. Over polling (the historical approach) where the web service must be repeatedly queried for the status of tasks and results eventually collected in bulk, the -|FuncXExecutor|_ class instantiates an AMQPS connection that streams results +|Executor|_ class instantiates an AMQPS connection that streams results directly -- and immediately -- as they arrive at the server. This is a far more efficient paradigm, simultaneously in terms of bytes over the wire, time spent waiting for results, and boilerplate code to check for results. -For most "simple" interactions with funcX, this class will likely be the +For most "simple" interactions with Globus Compute, this class will likely be the quickest and easiest avenue to submit tasks and acquire results. An example interaction: .. code-block:: python - :caption: funcxexecutor_basic_example.py + :caption: globus_compute_executor_basic_example.py - from funcx import FuncXExecutor + from globus_compute_sdk import Executor def double(x): return x * 2 tutorial_endpoint_id = '4b116d3c-1703-4f8f-9f6f-39921e5864df' - with FuncXExecutor(endpoint_id=tutorial_endpoint_id) as fxe: - fut = fxe.submit(double, 7) + with Executor(endpoint_id=tutorial_endpoint_id) as gce: + fut = gce.submit(double, 7) print(fut.result()) This example is only a quick-reference, showing the basic mechanics of how to -use the |FuncXExecutor|_ class and submitting a single task. However, there -are a number of details to observe. The first is that a |FuncXExecutor|_ +use the |Executor|_ class and submitting a single task. However, there +are a number of details to observe. The first is that a |Executor|_ instance is associated with a specific endpoint. We use the "well-known" tutorial endpoint in this example, but that can point to any endpoint to which you have access. @@ -40,19 +40,19 @@ you have access. (authenticated) user. You are welcome to use it, but please limit the size and number of functions you send to this endpoint as it is a shared resource that is (intentionally) not very powerful. It's primary intended - purpose is for an introduction to the funcX toolset. + purpose is for an introduction to the Globus Compute toolset. Second, the waiting -- or "blocking" -- for a result is automatic. The |.submit()|_ call returns a |Future|_ immediately; the actual HTTP call to the -funcX web-services will not have occurred yet, and neither will the task even +Globus Compute web-services will not have occurred yet, and neither will the task even been executed (remotely), much less a result received. The |.result()|_ call blocks ("waits") until all of that has completed, and the result has been received from the upstream services. -Third, |FuncXExecutor|_ objects can be used as context managers (the ``with`` -statement). Underneath the hood, the |FuncXExecutor|_ class uses threads to +Third, |Executor|_ objects can be used as context managers (the ``with`` +statement). Underneath the hood, the |Executor|_ class uses threads to implement the asynchronous interface -- a thread to coalesce and submit tasks, -and a thread to watch for incoming results. The |FuncXExecutor|_ logic cannot +and a thread to watch for incoming results. The |Executor|_ logic cannot determine when it will no longer receive tasks (i.e., no more |.submit()|_ calls) and so cannot prematurely shutdown. Thus, it must be told, either explicitly with a call to |.shutdown()|_, or implicitly when used as a context @@ -74,9 +74,9 @@ always end with 1. The rules are: To verify all of the sequences through 100, one brute-force approach is: .. code-block:: python - :caption: funcxexecutor_collatz.py + :caption: globus_compute_executor_collatz.py - from funcx import FuncXExecutor + from globus_compute_sdk import Executor def generate_collatz_sequence(N: int, sequence_limit = 10_000): seq = [N] @@ -93,9 +93,9 @@ To verify all of the sequences through 100, one brute-force approach is: generate_from = 1 generate_through = 100 futs, results, disproof_candidates = [], [], [] - with FuncXExecutor(endpoint_id=ep_id) as fxe: + with Executor(endpoint_id=ep_id) as gce: for n in range(generate_from, generate_through + 1): - futs.append(fxe.submit(generate_collatz_sequence, n)) + futs.append(gce.submit(generate_collatz_sequence, n)) print("Tasks all submitted; waiting for results") # The futures were appended to the `futs` list in order, so one could wait @@ -123,7 +123,7 @@ status. Futures make this simple with the |.done()|_ method: .. code-block:: python ... - future = fxe.submit(generate_collatz_sequence, 1234567890) + future = gce.submit(generate_collatz_sequence, 1234567890) # Use the .done() method to check the status of the function without # blocking; this will return a Bool indicating whether the result is ready @@ -142,9 +142,9 @@ than summarily return, and the specific number chosen starts a sequence that takes more than 100 steps to complete. .. code-block:: python - :caption: funcxexecutor_handle_result_exceptions.py + :caption: globus_compute_executor_handle_result_exceptions.py - from funcx import FuncXExecutor + from globus_compute_sdk import Executor def generate_collatz_sequence(N: int, sequence_limit=100): seq = [N] @@ -158,8 +158,8 @@ takes more than 100 steps to complete. raise ValueError(f"Sequence not terminated in {sequence_limit} steps") return seq - with FuncXExecutor(endpoint_id=ep_id) as fxe: - future = fxe.submit(generate_collatz_sequence, 1234567890) + with Executor(endpoint_id=ep_id) as gce: + future = gce.submit(generate_collatz_sequence, 1234567890) try: print(future.result()) @@ -180,7 +180,7 @@ finish.) There are a number of ways to work with results as they arrive; this example uses `concurrent.futures.as_completed`_: .. code-block:: python - :caption: funcxexecutor_results_as_arrived.py + :caption: globus_compute_executor_results_as_arrived.py import concurrent.futures @@ -192,8 +192,8 @@ example uses `concurrent.futures.as_completed`_: time.sleep(x * random.random()) return f"{x} -> {x * 2}" - with FuncXExecutor(endpoint_id=endpoint_id) as fxe: - futs = [fxe.submit(double, i) for i in range(10)] + with Executor(endpoint_id=endpoint_id) as gce: + futs = [gce.submit(double, i) for i in range(10)] # The futures were appended to the `futs` list in order, so one could # wait for each result in turn to get an ordered set: @@ -215,56 +215,56 @@ example uses `concurrent.futures.as_completed`_: Reloading Tasks --------------- -Waiting for incoming results with the |FuncXExecutor|_ requires an active +Waiting for incoming results with the |Executor|_ requires an active connection -- which is often at odds with closing a laptop clamshell (e.g., heading home for the weekend). For longer running jobs like this, the -|FuncXExecutor|_ offers the |.reload_tasks()|_ method. This method will reach -out to the funcX web-services to collect all of the tasks associated with the +|Executor|_ offers the |.reload_tasks()|_ method. This method will reach +out to the Globus Compute web-services to collect all of the tasks associated with the |.task_group_id|_, create a list of associated futures, finish (call |.set_result()|_) any previously finished tasks, and watch the unfinished futures. Consider the following (contrived) example: .. code-block:: python - :caption: funcxexecutor_reload_tasks.py + :caption: globus_compute_executor_reload_tasks.py # execute initially as: - # $ python funcxexecutor_reload_tasks.py + # $ python globus_compute_executor_reload_tasks.py # ... this Task Group ID: # ... # Then run with the Task Group ID as an argument: - # $ python funcxexecutor_reload_tasks.py + # $ python globus_compute_executor_reload_tasks.py import os, signal, sys, time, typing as t - from funcx import FuncXExecutor - from funcx.sdk.executor import FuncXFuture + from globus_compute_sdk import Executor + from globus_compute_sdk.sdk.executor import ComputeFuture task_group_id = sys.argv[1] if len(sys.argv) > 1 else None def task_kernel(num): - return f"your funcx logic result, from task: {num}" + return f"your Globus Compute logic result, from task: {num}" ep_id = "" - with FuncXExecutor(endpoint_id=ep_id) as fxe: - futures: t.Iterable[FuncXFuture] + with Executor(endpoint_id=ep_id) as gce: + futures: t.Iterable[ComputeFuture] if task_group_id: print(f"Reloading tasks from Task Group ID: {task_group_id}") - fxe.task_group_id = task_group_id - futures = fxe.reload_tasks() + gce.task_group_id = task_group_id + futures = gce.reload_tasks() else: # Save the task_group_id somewhere. Perhaps in a file, or less # robustly "as mere text" on your console: print( - "New session; creating funcX tasks; if this script dies, rehydrate" - f" futures with this Task Group ID: {fxe.task_group_id}" + "New session; creating Globus Compute tasks; if this script dies, rehydrate" + f" futures with this Task Group ID: {gce.task_group_id}" ) num_tasks = 5 - futures = [fxe.submit(task_kernel, i + 1) for i in range(num_tasks)] + futures = [gce.submit(task_kernel, i + 1) for i in range(num_tasks)] # Ensure all tasks have been sent upstream ... - while fxe.task_count_submitted < num_tasks: + while gce.task_count_submitted < num_tasks: time.sleep(1) - print(f"Tasks submitted upstream: {fxe.task_count_submitted}") + print(f"Tasks submitted upstream: {gce.task_count_submitted}") # ... before script death for [silly reason; did you lose power!?] bname = sys.argv[0] @@ -273,7 +273,7 @@ futures. Consider the following (contrived) example: print("Simulating unexpected process death! Now reload the session") print("by rerunning this script with the task_group_id:\n") - print(f" {bname} {fxe.task_group_id}\n") + print(f" {bname} {gce.task_group_id}\n") os.kill(os.getpid(), signal.SIGKILL) exit(1) # In case KILL takes split-second to process @@ -287,13 +287,13 @@ futures. Consider the following (contrived) example: print("Results:\n ", "\n ".join(results)) For a slightly more advanced usage, one could manually submit a batch of tasks -with the |FuncXClient|_, and wait for the results at a future time. Submitting +with the |Client|_, and wait for the results at a future time. Submitting the results might look like: .. code-block:: python - :caption: funcxclient_submit_batch.py + :caption: globus_compute_client_submit_batch.py - from funcx import FuncXClient + from globus_compute_sdk import Client def expensive_task(task_arg): import time @@ -301,43 +301,43 @@ the results might look like: return "All done!" ep_id = "" - fxc = FuncXClient() + gcc = Client() - print(f"Task Group ID for later reloading: {fxc.session_task_group_id}") - fn_id = fxc.register_function(expensive_task) - batch = fxc.create_batch() + print(f"Task Group ID for later reloading: {gcc.session_task_group_id}") + fn_id = gcc.register_function(expensive_task) + batch = gcc.create_batch() for task_i in range(10): batch.add(fn_id, ep_id, args=(task_i,)) - self.funcx_client.batch_run(batch) + gcc.batch_run(batch) And ~24 hours later, could reload the tasks with the executor to continue processing: .. code-block:: python - :caption: funcxexecutor_reload_batch.py + :caption: globus_compute_executor_reload_batch.py - from funcx import FuncXExecutor + from globus_compute_sdk import Executor ep_id = "" tg_id = "Saved task group id from 'yesterday'" - with FuncxExecutor(endpoint_id=ep_id, task_group_id=tg_id) as fxe: - futures = fxe.reload_tasks()) + with Executor(endpoint_id=ep_id, task_group_id=tg_id) as gce: + futures = gce.reload_tasks() for f in concurrent.futures.as_completed(futs): print("Received:", f.result()) -.. |FuncXClient| replace:: ``FuncXClient`` -.. _FuncXClient: reference/client.html -.. |FuncXExecutor| replace:: ``FuncXExecutor`` -.. _FuncXExecutor: reference/executor.html +.. |Client| replace:: ``Client`` +.. _Client: reference/client.html +.. |Executor| replace:: ``Executor`` +.. _Executor: reference/executor.html .. |Future| replace:: ``Future`` .. _Future: https://docs.python.org/3/library/concurrent.futures.html#future-objects .. |Executor| replace:: ``Executor`` .. _Executor: https://docs.python.org/3/library/concurrent.futures.html#executor-objects .. |.shutdown()| replace:: ``.shutdown()`` -.. _.shutdown(): reference/executor.html#funcx.FuncXExecutor.shutdown +.. _.shutdown(): reference/executor.html#globus_compute_sdk.Executor.shutdown .. |.submit()| replace:: ``.submit()`` -.. _.submit(): reference/executor.html#funcx.FuncXExecutor.submit +.. _.submit(): reference/executor.html#globus_compute_sdk.Executor.submit .. |.result()| replace:: ``.result()`` .. _.result(): https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.Future.result .. |.done()| replace:: ``.done()`` @@ -345,8 +345,8 @@ processing: .. |.set_result()| replace:: ``.set_result()`` .. _.set_result(): https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.Future.set_result .. |.reload_tasks()| replace:: ``.reload_tasks()`` -.. _.reload_tasks(): reference/executor.html#funcx.FuncXExecutor.reload_tasks +.. _.reload_tasks(): reference/executor.html#globus_compute_sdk.Executor.reload_tasks .. |.task_group_id| replace:: ``.task_group_id`` -.. _.task_group_id: reference/executor.html#funcx.FuncXExecutor.task_group_id +.. _.task_group_id: reference/executor.html#globus_compute_sdk.Executor.task_group_id .. _Collatz conjecture: https://en.wikipedia.org/wiki/Collatz_conjecture .. _concurrent.futures.as_completed: https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.as_completed diff --git a/docs/funcx_upgrade.rst b/docs/funcx_upgrade.rst new file mode 100644 index 000000000..86bdbe4b7 --- /dev/null +++ b/docs/funcx_upgrade.rst @@ -0,0 +1,47 @@ +########################################### +Upgrading from funcX SDK and funcx-endpoint +########################################### + +Note +^^^^ +This document will be expanded at a later date to include more details. + +Background +^^^^^^^^^^ + +The Globus team is renaming funcX to Globus Compute in order centralize our +infrastructure under a single umbrealla. + +funcX SDK +^^^^^^^^^ + +The `funcx` PyPI package was formerly the funcX SDK. This is now named the `Globus +Compute SDK` and is `available on PyPI `_ +under the same name. + +If you currently have funcX installed, we recommend these steps to upgrade to +Globus Compute: + + | $ pip uninstall funcx + | $ mv ~/.funcx ~/.compute # Optional + | $ Install Globus Compute SDK in its own venv `as detailed here `__ + +The `funcx` package is still available on PyPI but will merely be a wrapper +around the Globus Compute SDK. The wrapper will only be available for +a limited time to assist users as an easier migration path. + +funcX Endpoint +^^^^^^^^^^^^^^ + +`funcx-endpoint` on PyPI was the former funcX endpoint package. This is now called +the `Globus Compute Endpoint` and is +`available on PyPI `_. + + | $ pip uninstall funcx-endpoint + | $ mv ~/.funcx ~/.compute # Optional + | $ Install Globus Compute Endpoint `using pipx `__ + +The `funcx-endpoint` package is still available on PyPI but will merely be a wrapper +around Globus Compute Endpoint. The wrapper will only be available for +a limited time to assist users as an easier migration path. + diff --git a/docs/index.rst b/docs/index.rst index 5d0fccd5b..2aaf4daed 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,64 +1,64 @@ -.. funcX documentation master file, created by +.. Globus Compute documentation master file, created by sphinx-quickstart on Mon Jul 1 11:43:54 2019. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. -funcX - Federated Function as a Service +Globus Compute - Federated Function as a Service ######################################### -funcX is a distributed Function as a Service (FaaS) platform +Globus Compute is a distributed Function as a Service (FaaS) platform that enables flexible, scalable, and high performance remote function execution. -Unlike centralized FaaS platforms, funcX allows users to execute +Unlike centralized FaaS platforms, Globus Compute allows users to execute functions on heterogeneous remote computers, from laptops to campus clusters, clouds, and supercomputers. -funcX is composed of two core components: +Globus Compute is composed of two core components: -* The funcX cloud-hosted service provides an available, reliable, and secure interface for registering, sharing, and executing functions on remote endpoints. It implements a fire-and-forget model via which the cloud service is responsible for securely communicating with endpoints to ensure functions are successfully executed. +* The Globus Compute cloud-hosted service provides an available, reliable, and secure interface for registering, sharing, and executing functions on remote endpoints. It implements a fire-and-forget model via which the cloud service is responsible for securely communicating with endpoints to ensure functions are successfully executed. -* funcX endpoints transform existing laptops, clouds, clusters, and supercomputers into function serving systems. Endpoints are registered by installing the funcX_endpoint software and configuring it for the target system. +* Globus Compute endpoints transform existing laptops, clouds, clusters, and supercomputers into function serving systems. Endpoints are registered by installing the Globus Compute endpoint software and configuring it for the target system. -funcX model +Globus Compute model ^^^^^^^^^^^^^^^^^^ -funcX works like other FaaS platforms: users first register a function with -funcX by specifying the function body (in Python), they may then execute that +Globus Compute works like other FaaS platforms: users first register a function with +Globus Compute by specifying the function body (in Python), they may then execute that function by specifying the function ID and input arguments. Unlike traditional FaaS platforms, users also specify the endpoint ID on which they wish to execute the function. -funcX endpoints are user-managed and may be configured on a wide range of resources -from laptops and scientific instruments through to supercomputers. The funcX endpoint +Globus Compute endpoints are user-managed and may be configured on a wide range of resources +from laptops and scientific instruments through to supercomputers. The Globus Compute endpoint can be configured to execute functions locally (i.e., using multiple processes) or on connected computing resources (i.e., by provisioning and managing compute nodes from a batch scheduler or cloud API). -funcX implements a reliable fire-and-forget execution model. After invoking a function, -a user can close their laptop and rely on funcX to manage the execution and store the -results. funcX securely communicates with remote endpoints, waits for resources -to become available, and can even retry execution upon failure. funcX stores results (or +Globus Compute implements a reliable fire-and-forget execution model. After invoking a function, +a user can close their laptop and rely on Globus Compute to manage the execution and store the +results. Globus Compute securely communicates with remote endpoints, waits for resources +to become available, and can even retry execution upon failure. Globus Compute stores results (or errors) in the cloud-hosted service until they are retrieved by the user. -.. image:: _static/images/funcX-model.png +.. image:: _static/images/compute-model.png -Using funcX +Using Globus Compute ^^^^^^^^^^^^^^^^^^ -funcX offers a Python SDK for registering, sharing, and executing functions. -The following code block examples how funcX can be used to execute a "hello +Globus Compute offers a Python SDK for registering, sharing, and executing functions. +The following code block examples how Globus Compute can be used to execute a "hello world" function on a remote endpoint. .. code-block:: python - from funcx import FuncXExecutor + from globus_compute_sdk import Executor def hello_world(): return "Hello World!" tutorial_endpoint_id = '4b116d3c-1703-4f8f-9f6f-39921e5864df' - with FuncXExecutor(endpoint_id=tutorial_endpoint_id) as fxe: + with Executor(endpoint_id=tutorial_endpoint_id) as fxe: future = fxe.submit(hello_world) print(future.result()) @@ -66,15 +66,15 @@ world" function on a remote endpoint. Deploying an endpoint ^^^^^^^^^^^^^^^^^^^^^^^ -A funcX endpoint can be created by installing the funcX endpoint software +A Globus Compute endpoint can be created by installing the Globus Compute endpoint software and configuring it for the target resources. The following steps show how to download and configure an endpoint for local (multi-process) execution. :: - $ python3 -m pipx install funcx_endpoint + $ python3 -m pipx install globus-compute-endpoint - $ funcx-endpoint configure + $ globus-compute-endpoint configure - $ funcx-endpoint start + $ globus-compute-endpoint start diff --git a/docs/limits.rst b/docs/limits.rst index bbac69c98..dcfcacc94 100644 --- a/docs/limits.rst +++ b/docs/limits.rst @@ -1,15 +1,15 @@ Limits ------ -This section describes the limits placed by funcX to ensure high availability and reliability to +This section describes the limits placed by Globus Compute to ensure high availability and reliability to all users sharing this hosted service. Routing high volumes of tasks and results between users and endpoints is resource-intensive from a computation, memory, and network transport perspective. -To guarantee truly fire and forget tasks and fairness for all users funcX applies the limitations +To guarantee truly fire and forget tasks and fairness for all users Globus Compute applies the limitations described below uniformly to all users. -There are three mechanisms by which funcX controls the flow of tasks through the system: +There are three mechanisms by which Globus Compute controls the flow of tasks through the system: -* Task-Rate limiting: Controls the rate at which you can submit new tasks to funcX +* Task-Rate limiting: Controls the rate at which you can submit new tasks to Globus Compute * Data limits: Restricts the data volume for task inputs and outputs * Task TTL: Sets the maximum task lifetime after which stale tasks are abandoned @@ -17,9 +17,9 @@ There are three mechanisms by which funcX controls the flow of tasks through the Task-Rate Limiting ^^^^^^^^^^^^^^^^^^ -funcX uses Task-Rate limiting to ensure the quality of service to all users on the funcX platform. +Globus Compute uses Task-Rate limiting to ensure the quality of service to all users on the Globus Compute platform. These limits ensure that users might not accidentally overload the service with requests and overload -the system for everyone else. Currently, funcX limits the number of requests submitted from a client to +the system for everyone else. Currently, Globus Compute limits the number of requests submitted from a client to **20 requests per 10 seconds**. If this submission rate is exceeded, the SDK will raise a ``MaxRequestsExceeded`` exception. @@ -33,7 +33,7 @@ Data Limits ^^^^^^^^^^^ Data limits are used to ensure that the inputs and results associated with functions can be handled -by funcx both at the user level as well as in the aggregate. Without these limits functions that either +by Globus Compute both at the user level as well as in the aggregate. Without these limits functions that either consume or produce large volumes of data could overload the system. The current data limit is set to **5MB** on task submissions, which applies to both individual functions @@ -57,7 +57,7 @@ Task time to live (TTL) is a mechanism to identify tasks that are possibly aband environment where tasks may take several days to be allocated compute resources (eg, by a cluster) or simply take days to run to completion, distinguishing between tasks that are blocked vs abandoned is difficult. On the other hand, tasks once launched can be lost on the client-side due to a crash or a -programmatic error. To avoid such cases, funcX considers a task to be abandoned when there has been +programmatic error. To avoid such cases, Globus Compute considers a task to be abandoned when there has been no activity on a task for more than **2 weeks**. However, when a result is available but has not been fetched by the client within **30 minutes** of completion, the result is marked as abandoned. diff --git a/docs/quickstart.rst b/docs/quickstart.rst index 73a3a03a7..99de8541a 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -1,17 +1,17 @@ Quickstart ========== -**funcX** client and endpoint software releases are available on `PyPI `_. +**Globus Compute** client and endpoint software releases are available on `PyPI `_. -You can try funcX on a hosted Jupyter notebook with `Binder `_ +You can try Globus Compute on a hosted Jupyter notebook with `Binder `_ Installation ------------ -**funcX** comes with two components: the **endpoint**, a user-managed software agent that must be deployed on a compute resource to make it accessible for function execution; and the **funcX client**, which provides a Python API for registration, execution, and management of functions across **endpoints**. +**Globus Compute** comes with two components: the **endpoint**, a user-managed software agent that must be deployed on a compute resource to make it accessible for function execution; and the **Globus Compute client**, which provides a Python API for registration, execution, and management of functions across **endpoints**. -The pre-requisites for the `funcX endpoint` and the `funcX client` are +The pre-requisites for the `Globus Compute endpoint` and the `Globus Compute client` are 1. Python3.7+ 2. The machine must have outbound network access @@ -22,42 +22,44 @@ To check if you have the right Python version, run the following commands:: This should return the Python version, for example: ``Python 3.8.10``. -To check if your endpoint/client has network access and can connect to the funcX service, run:: +To check if your endpoint/client has network access and can connect to the Globus Compute service, run:: >>> curl https://api2.funcx.org/v2/version This should return a version string, for example: ``"1.0.5"`` -.. note:: The funcx client is supported on MacOS, Linux, and Windows. The funcx-endpoint +.. note:: The Globus Compute client is supported on MacOS, Linux, and Windows. The globus-compute-endpoint is only supported on Linux. -Installing funcX in a Virtual Environment +.. _install_gc_sdk: +Installing Globus Compute in a Virtual Environment ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -While ``pip`` and ``pip3`` can be used to install funcX we suggest the following approach +While ``pip`` and ``pip3`` can be used to install Globus Compute we suggest the following approach for reliable installation to avoid python package dependency conflicts. -1. Install the funcX client in its own `venv `_ environment:: +1. Install the Globus Compute client in its own `venv `_ environment:: - $ python3 -m venv path/to/funcx_venv - $ source path/to/funcx_venv/bin/activate - (funcx_venv) $ python3 -m pip install funcx + $ python3 -m venv path/to/globus_compute_venv + $ source path/to/globus_compute_venv/bin/activate + (globus_compute_venv) $ python3 -m pip install globus-compute-sdk - To update a previously installed funcX to a newer version in the virtual environment, use:: + To update a previously installed Globus Compute to a newer version in the virtual environment, use:: - (funcx_venv) $ python3 -m pip install -U funcx + (globus_compute_venv) $ python3 -m pip install -U globus-compute-sdk -2. (Optional) The funcX endpoint can be installed using `Pipx `_ or using pip in the venv:: +.. _install_gc_endpoint: +2. (Optional) The Globus Compute endpoint can be installed using `Pipx `_ or using pip in the venv:: - $ python3 -m pipx install funcx_endpoint + $ python3 -m pipx install globus-compute-endpoint or - (funcx_venv) $ python3 -m pip install funcx_endpoint + (globus_compute_venv) $ python3 -m pip install globus-compute-endpoint 3. (Optional) Install Jupyter for Tutorial notebooks in the venv:: - (funcx_venv) $ python3 -m pip install jupyter + (globus_compute_venv) $ python3 -m pip install jupyter .. note:: For more detailed info on setting up Jupyter with Python3.5 go `here `_ @@ -66,17 +68,17 @@ for reliable installation to avoid python package dependency conflicts. First Run --------- -The funcX SDK makes use of the funcX web services, most of which restrict use +The Globus Compute SDK makes use of the Globus Compute web services, most of which restrict use to Globus authenticated users. Consequently, if you have not previously used -funcX from your workstation, or have otherwise not authenticated with Globus, -then the FuncXClient will present a one-time URL. The one-time URL workflow +Globus Compute from your workstation, or have otherwise not authenticated with Globus, +then the Client will present a one-time URL. The one-time URL workflow will culminate in a token code to be pasted back into the terminal. The easiest approach is typically from the command line: .. code-block:: python - >>> from funcx import FuncXClient - >>> FuncXClient() + >>> from globus_compute_sdk import Client + >>> Client() Please authenticate with Globus here: ------------------------------------ https://auth.globus.org/v2/oauth2/authorize?[...very...long...url]&prompt=login @@ -84,19 +86,19 @@ easiest approach is typically from the command line: Enter the resulting Authorization Code here: -funcX will then cache the credentials for future invocations, so this workflow +Globus Compute will then cache the credentials for future invocations, so this workflow will only be initiated once. Running a function ------------------ -After installing the funcX SDK, you can define a function and submit it for +After installing the Globus Compute SDK, you can define a function and submit it for execution to available endpoints. For most use-cases that will use the -``FuncXExecutor``: +``Executor``: .. code-block:: python - from funcx import FuncXExecutor + from globus_compute_sdk import Executor # First, define the function ... def add_func(a, b): @@ -104,9 +106,9 @@ execution to available endpoints. For most use-cases that will use the tutorial_endpoint_id = '4b116d3c-1703-4f8f-9f6f-39921e5864df' # Public tutorial endpoint # ... then create the executor, ... - with FuncXExecutor(endpoint_id=tutorial_endpoint_id) as fxe: + with Executor(endpoint_id=tutorial_endpoint_id) as gce: # ... then submit for execution, ... - future = fxe.submit(add_func, 5, 10) + future = gce.submit(add_func, 5, 10) # ... and finally, wait for the result print(future.result()) @@ -114,7 +116,7 @@ execution to available endpoints. For most use-cases that will use the .. note:: Like most FaaS platforms, the function must be registered with the upstream web services before it can be executed on a remote endopint. While one can - manually register a function (see the FuncXClient or FuncXExecutor API + manually register a function (see the Client or Executor API documentation), the above workflow will automatically handle registration. A word on the above example: while the tutorial endpoint is open for anyone to @@ -126,19 +128,19 @@ Deploying an endpoint ---------------------- You can deploy an endpoint on your laptop, cluster, or cloud -by downloading and installing the funcX endpoint software. -The funcX endpoint software is available on PyPI and a default +by downloading and installing the Globus Compute endpoint software. +The Globus Compute endpoint software is available on PyPI and a default endpoint can be configured and started as follows. During the configuration process you will be prompted to authenticate following the same process as using the SDK. For more advanced deployments (e.g., on clouds and clusters) please refer to the `endpoints`_ documentation. :: - $ python3 -m pip install funcx_endpoint + $ python3 -m pip install globus-compute-endpoint - $ funcx-endpoint configure + $ globus-compute-endpoint configure - $ funcx-endpoint start + $ globus-compute-endpoint start .. _endpoints: endpoints.html diff --git a/docs/reference/client.rst b/docs/reference/client.rst index 5021d8226..3117645f3 100644 --- a/docs/reference/client.rst +++ b/docs/reference/client.rst @@ -1,10 +1,10 @@ -The FuncX Client +The Globus Compute Client ================ -.. autoclass:: funcx.FuncXClient +.. autoclass:: globus_compute_sdk.Client :members: :member-order: bysource -.. autoclass:: funcx.sdk.container_spec.ContainerSpec +.. autoclass:: globus_compute_sdk.sdk.container_spec.ContainerSpec :members: :member-order: bysource diff --git a/docs/reference/executor.rst b/docs/reference/executor.rst index bf88dcf3e..944fdfa27 100644 --- a/docs/reference/executor.rst +++ b/docs/reference/executor.rst @@ -1,10 +1,10 @@ -The FuncX Executor +The Globus Compute Executor ================== -.. autoclass:: funcx.FuncXExecutor +.. autoclass:: globus_compute_sdk.Executor :members: :member-order: bysource -.. autoclass:: funcx.sdk.executor.FuncXFuture +.. autoclass:: globus_compute_sdk.sdk.executor.ComputeFuture :members: :member-order: bysource diff --git a/docs/reference/index.rst b/docs/reference/index.rst index b331ceb3d..94b4a88d3 100644 --- a/docs/reference/index.rst +++ b/docs/reference/index.rst @@ -1,6 +1,6 @@ -.. module:: funcx +.. module:: globus_compute_sdk -funcX SDK +Globus Compute SDK ========= .. toctree:: diff --git a/docs/sdk.rst b/docs/sdk.rst index 74bb26179..a8209b59d 100644 --- a/docs/sdk.rst +++ b/docs/sdk.rst @@ -1,7 +1,7 @@ -funcX SDK User Guide +Globus Compute SDK User Guide ==================== -The **funcX SDK** provides a programmatic interface to funcX from Python. +The **Globus Compute SDK** provides a programmatic interface to Globus Compute from Python. The SDK provides a convenient Pythonic interface to: 1. Register functions @@ -10,35 +10,35 @@ The SDK provides a convenient Pythonic interface to: 4. Check the status of launched functions 5. Retrieve outputs from functions -The SDK provides a client class for interacting with funcX. The client -abstracts authentication and provides an interface to make funcX -API calls without needing to know the funcX REST endpoints for those operations. -You can instantiate a funcX client as follows: +The SDK provides a client class for interacting with Globus Compute. The client +abstracts authentication and provides an interface to make Globus Compute +API calls without needing to know the Globus Compute REST endpoints for those operations. +You can instantiate a Globus Compute client as follows: .. code-block:: python - from funcx import FuncXClient - fxc = FuncXClient() + from globus_compute_sdk import Client + gcc = Client() Instantiating a client will start an authentication process where you will be asked to authenticate via Globus Auth. -We require every interaction with funcX to be authenticated, as this enables enforced +We require every interaction with Globus Compute to be authenticated, as this enables enforced access control on both functions and endpoints. Globus Auth is an identity and access management platform that provides authentication brokering -capablities enabling users to login using one of several hundred supported identities. +capabilities enabling users to login using one of several hundred supported identities. It also provides group and profile management for user accounts. -As part of the authentication process, funcX will request access -to your identity (to retrieve your email address) and Globus Groups. funcX uses +As part of the authentication process, Globus Compute will request access +to your identity (to retrieve your email address) and Globus Groups. Globus Compute uses Groups to facilitate sharing and to make authorization decisions. -funcX allows endpoints and functions to be shared by associating a Globus Group. +Globus Compute allows endpoints and functions to be shared by associating a Globus Group. -.. note:: funcX internally caches function, endpoint, and authorization lookups. Caches are based on user authentication tokens. To force refresh cached +.. note:: Globus Compute internally caches function, endpoint, and authorization lookups. Caches are based on user authentication tokens. To force refresh cached entries, you can re-authenticate your client with ``force_login=True``. Registering Functions --------------------- -You can register a Python function with funcX via ``register_function()``. Function registration serializes the -function body and transmits it to funcX. Once the function is registered with funcX, it is assigned a +You can register a Python function with Globus Compute via ``register_function()``. Function registration serializes the +function body and transmits it to Globus Compute. Once the function is registered with Globus Compute, it is assigned a UUID that can be used to manage and invoke the function. .. note:: You must import any dependencies required by the function inside the function body. @@ -46,7 +46,7 @@ UUID that can be used to manage and invoke the function. The following example shows how to register a function. In this case, the function simply returns the platform information of the system on which it is executed. The function -is defined in the same way as any Python function before being registered with funcX. +is defined in the same way as any Python function before being registered with Globus Compute. .. code-block:: python @@ -54,7 +54,7 @@ is defined in the same way as any Python function before being registered with f import platform return platform.platform() - func_uuid = fxc.register_function(platform_func) + func_uuid = gcc.register_function(platform_func) Running Functions @@ -62,16 +62,16 @@ Running Functions You can invoke a function using the UUID returned when registering the function. The ``run()`` function requires that you specify the function (``function_id``) and endpoint (``endpoint_id``) on which to execute -the function. funcX will return a UUID for the executing function (called a task) via which you can +the function. Globus Compute will return a UUID for the executing function (called a task) via which you can monitor status and retrieve results. .. code-block:: python tutorial_endpoint = '4b116d3c-1703-4f8f-9f6f-39921e5864df' - task_id = fxc.run(endpoint_id=tutorial_endpoint, function_id=func_uuid) + task_id = gcc.run(endpoint_id=tutorial_endpoint, function_id=func_uuid) .. note:: - funcX places limits on the size of the functions and the rate at which functions can be submitted. + Globus Compute places limits on the size of the functions and the rate at which functions can be submitted. Please refer to the limits section for TODO:YADU @@ -86,34 +86,34 @@ task is still pending. .. code-block:: python try: - print(fxc.get_result(task_id)) + print(gcc.get_result(task_id)) except Exception as e: print("Exception: {}".format(e)) -.. note:: funcX caches results in the cloud until they have been retrieved. The SDK also caches results +.. note:: Globus Compute caches results in the cloud until they have been retrieved. The SDK also caches results during a session. However, calling ``get_result()`` from a new session will not be able to access the results. Arguments and data ------------------ -funcX functions operate the same as any other Python function. You can pass arguments \*args and \**kwargs -and return values from functions. The only constraint is that data passed to/from a funcX function must be +Globus Compute functions operate the same as any other Python function. You can pass arguments \*args and \**kwargs +and return values from functions. The only constraint is that data passed to/from a Globus Compute function must be serializable (e.g., via Pickle) and fall within service limits. Input arguments can be passed to the function using the ``run()`` function. The following example shows how strings can be passed to and from a function. .. code-block:: python - def funcx_hello(firstname, lastname): + def hello(firstname, lastname): return 'Hello {} {}'.format(firstname, lastname) - func_id = fxc.register_function(funcx_hello) + func_id = gcc.register_function(hello) - task_id = fxc.run("Bob", "Smith", endpoint_id=tutorial_endpoint, function_id=func_id) + task_id = gcc.run("Bob", "Smith", endpoint_id=tutorial_endpoint, function_id=func_id) try: - print(fxc.get_result(task_id)) + print(gcc.get_result(task_id)) except Exception as e: print("Exception: {}".format(e)) @@ -127,16 +127,16 @@ To share with a group, set ``group=`` when registering a functi .. code-block:: python - fxc.register_function(funcx, description="My function", group=) + gcc.register_function(func, description="My function", group=) -Upon execution, funcX will check group membership to ensure that the user is authorized to execute the function. +Upon execution, Globus Compute will check group membership to ensure that the user is authorized to execute the function. You can also set a function to be publicly accessible by setting ``public=True`` when registering the function. .. code-block:: python - fxc.register_function(funcx, description="My function", public=True) + gcc.register_function(func, description="My function", public=True) .. _batching: @@ -151,13 +151,13 @@ corresponding to the functions in the batch with the ordering preserved. .. code-block:: python - batch = fxc.create_batch() + batch = gcc.create_batch() for x in range(0,5): batch.add(x, endpoint_id=tutorial_endpoint, function_id=func_id) # batch_run returns a list task ids - batch_res = fxc.batch_run(batch) + batch_res = gcc.batch_run(batch) The batch result interface is useful to to fetch the results of a collection of task_ids. @@ -167,7 +167,7 @@ and a result if it is available. .. code-block:: python - >>> results = fxc.get_batch_result(batch_res) + >>> results = gcc.get_batch_result(batch_res) >>> print(results) {'10c9678c-b404-4e40-bfd4-81581f52f9db': {'pending': False, @@ -187,26 +187,26 @@ and a result if it is available. } -.. _client credentials with funcxclients: +.. _client credentials with globus compute clients: -Client Credentials with FuncXClients ------------------------------------- +Client Credentials with Clients +------------------------------- Client credentials can be useful if you need an endpoint to run in a service account or to be started automatically with a process manager. -The funcX SDK supports use of Globus Auth client credentials for login, if you have `registered a client. `_ +The Globus Compute SDK supports use of Globus Auth client credentials for login, if you have `registered a client. `_ To use client credentials, you must set the envrionment variables **FUNCX_SDK_CLIENT_ID** to your client ID, and **FUNCX_SDK_CLIENT_SECRET** to your client secret. -When these envrionment variables are set they will take priority over any other credentials on the system and the FuncXClient will assume the identity of the client app. -This also applies when starting a funcX endpoint. +When these envrionment variables are set they will take priority over any other credentials on the system and the Client will assume the identity of the client app. +This also applies when starting a Globus Compute endpoint. .. code:: bash $ export FUNCX_SDK_CLIENT_ID="b0500dab-ebd4-430f-b962-0c85bd43bdbb" $ export FUNCX_SDK_CLIENT_SECRET="ABCDEFGHIJKLMNOP0123456789=" -.. note:: funcX clients and endpoints will use the client credentials if they are set, so it is important to ensure the client submitting requests has access to an endpoint. +.. note:: Globus Compute clients and endpoints will use the client credentials if they are set, so it is important to ensure the client submitting requests has access to an endpoint. .. _login manager: @@ -214,10 +214,10 @@ This also applies when starting a funcX endpoint. Using a Custom LoginManager --------------------------- -To programmatically create a FuncXClient from tokens and remove the need to perform a Native App login flow you can use a custom *LoginManager*. -The LoginManager is responsible for serving tokens to the FuncXClient as needed. Typically, this would perform a Native App login flow, store tokens, and return them as needed. +To programmatically create a Client from tokens and remove the need to perform a Native App login flow you can use a custom *LoginManager*. +The LoginManager is responsible for serving tokens to the Client as needed. Typically, this would perform a Native App login flow, store tokens, and return them as needed. -A custom LoginManager can be used to simply return static tokens and enable programmatic use of the FuncXClient. +A custom LoginManager can be used to simply return static tokens and enable programmatic use of the Client. .. note:: To access the funcX API the scope that needs to be requested from @@ -228,20 +228,20 @@ A custom LoginManager can be used to simply return static tokens and enable prog https://auth.globus.org/scopes/facd7ccc-c5f4-42aa-916b-a0e270e2c2a9/all -More details on the funcX login manager prototcol are available `here. `_ +More details on the Globus Compute login manager prototcol are available `here. `_ .. code:: python import globus_sdk from globus_sdk.scopes import AuthScopes - from funcx.sdk.login_manager import LoginManager - from funcx.sdk.web_client import FuncxWebClient - from funcx import FuncXClient + from globus_compute_sdk.sdk.login_manager import LoginManager + from globus_compute_sdk.sdk.web_client import WebClient + from globus_compute_sdk import Client - class FuncXLoginManager: + class LoginManager: """ - Implements the funcx.sdk.login_manager.protocol.LoginManagerProtocol class. + Implements the globus_compute_sdk.sdk.login_manager.protocol.LoginManagerProtocol class. """ def __init__(self, authorizers: dict[str, globus_sdk.RefreshTokenAuthorizer]): @@ -252,10 +252,10 @@ More details on the funcX login manager prototcol are available `here. FuncxWebClient: - return FuncxWebClient( + def get_web_client(self, *, base_url: str) -> WebClient: + return WebClient( base_url=base_url, - authorizer=self.authorizers[FuncXClient.FUNCX_SCOPE], + authorizer=self.authorizers[Client.FUNCX_SCOPE], ) def ensure_logged_in(self): @@ -265,13 +265,13 @@ More details on the funcX login manager prototcol are available `here. =0.0.2a0 | +| workerInit | Command to execute on worker before strating uip | pip install parsl==0.9.0;pip install --force-reinstall globus-compute-sdk>=2.0.0 | | workerNamespace | Kubernetes namespace to launch worker pods into | default | | workingDir | Directory inside the container where log files are to be stored | /tmp/worker_logs | | rbacEnabled | Create service account and roles? | true | @@ -165,8 +165,8 @@ The deployment is configured via values.yaml file. | maxBlocks | Maximum number of worker pods to spawn | 100 | | maxWorkersPerPod | How many workers will be scheduled in each pod | 1 | | endpointName | (Optional) Specify a name for registration with the funcX web services | The release name (Release.Name) | -| endpointUUID | (Required) Specify a UUID for this endpoint | | -| endpointCLIargs | Any additional command line arguments to give to the `funcx-endpoint` executable | | +| endpointUUID | (Required) Specify a UUID for this endpoint. | | +| endpointCLIargs | Any additional command line arguments to give to the `globus-compute-endpoint` executable | | | maxIdleTime | The maximum time to maintain an idle worker. After this time the SimpleStrategy will terminate the idle worker. | 3600 | | imagePullSecret | The K8s secret to use to deploy worker images. This can refer to an ECR secret. | | | secrets | Kubernetes secret object in which to find client credential environment variables | | diff --git a/helm/boot.sh b/helm/boot.sh index 8eadd4d13..82e1a66a8 100755 --- a/helm/boot.sh +++ b/helm/boot.sh @@ -6,12 +6,12 @@ EP_UUID="$1"; shift echo -e "\n Preparing to start kubelet Endpoint: $EP_UUID ($EP_NAME)\n" mkdir -p "$HOME/.funcx/$EP_NAME/" -cp /funcx/ep_instance/* "$HOME/.funcx/$EP_NAME/" -cp /funcx/config/config.py "$HOME/.funcx/" +cp /compute/"$EP_NAME"/* "$HOME/.funcx/$EP_NAME/" +cp /compute/config/config.py "$HOME/.funcx/" -if [[ -e "/funcx/credentials/storage.db" ]]; then - cp /funcx/credentials/storage.db "$HOME/.funcx/" +if [[ -e "/compute/credentials/storage.db" ]]; then + cp /compute/credentials/storage.db "$HOME/.funcx/" chmod 600 "$HOME/.funcx/storage.db" fi -exec funcx-endpoint start "$EP_NAME" --endpoint-uuid "$EP_UUID" "$@" +exec globus-compute-endpoint start "$EP_NAME" --endpoint-uuid "$EP_UUID" "$@" diff --git a/helm/funcx_endpoint/Chart.yaml b/helm/funcx_endpoint/Chart.yaml index 002765949..c26b1615f 100644 --- a/helm/funcx_endpoint/Chart.yaml +++ b/helm/funcx_endpoint/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 -name: funcx_endpoint -description: Deploy a funcX endpoint into a cluster +name: globus_compute_endpoint +description: Deploy a Globus Compute endpoint into a cluster type: application diff --git a/helm/funcx_endpoint/templates/NOTES.txt b/helm/funcx_endpoint/templates/NOTES.txt index c9546b830..2d2fc6671 100644 --- a/helm/funcx_endpoint/templates/NOTES.txt +++ b/helm/funcx_endpoint/templates/NOTES.txt @@ -1,5 +1,5 @@ Ready to use your new endpoint To find your Endpoint UID -export EP_POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app=funcx-endpoint" -o jsonpath="{.items[0].metadata.name}") +export EP_POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app=globus-compute-endpoint" -o jsonpath="{.items[0].metadata.name}") kubectl logs $EP_POD_NAME diff --git a/helm/funcx_endpoint/templates/_helpers.tpl b/helm/funcx_endpoint/templates/_helpers.tpl index d06242f28..5428ffe54 100644 --- a/helm/funcx_endpoint/templates/_helpers.tpl +++ b/helm/funcx_endpoint/templates/_helpers.tpl @@ -2,7 +2,7 @@ {{/* Expand the name of the chart. */}} -{{- define "funcx_endpoint.name" -}} +{{- define "globus_compute_endpoint.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} @@ -11,7 +11,7 @@ Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} -{{- define "funcx_endpoint.fullname" -}} +{{- define "globus_compute_endpoint.fullname" -}} {{- if .Values.fullnameOverride -}} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} {{- else -}} @@ -27,16 +27,16 @@ If release name contains chart name it will be used as a full name. {{/* Create chart name and version as used by the chart label. */}} -{{- define "funcx_endpoint.chart" -}} +{{- define "globus_compute_endpoint.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Common labels */}} -{{- define "funcx_endpoint.labels" -}} -helm.sh/chart: {{ include "funcx_endpoint.chart" . }} -{{ include "funcx_endpoint.selectorLabels" . }} +{{- define "globus_compute_endpoint.labels" -}} +helm.sh/chart: {{ include "globus_compute_endpoint.chart" . }} +{{ include "globus_compute_endpoint.selectorLabels" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} @@ -46,17 +46,17 @@ app.kubernetes.io/managed-by: {{ .Release.Service }} {{/* Selector labels */}} -{{- define "funcx_endpoint.selectorLabels" -}} -app.kubernetes.io/name: {{ include "funcx_endpoint.name" . }} +{{- define "globus_compute_endpoint.selectorLabels" -}} +app.kubernetes.io/name: {{ include "globus_compute_endpoint.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end -}} {{/* Create the name of the service account to use */}} -{{- define "funcx_endpoint.serviceAccountName" -}} +{{- define "globus_compute_endpoint.serviceAccountName" -}} {{- if .Values.serviceAccount.create -}} - {{ default (include "funcx_endpoint.fullname" .) .Values.serviceAccount.name }} + {{ default (include "globus_compute_endpoint.fullname" .) .Values.serviceAccount.name }} {{- else -}} {{ default "default" .Values.serviceAccount.name }} {{- end -}} diff --git a/helm/funcx_endpoint/templates/endpoint-deployment.yaml b/helm/funcx_endpoint/templates/endpoint-deployment.yaml index 9b8746a40..777c98f6d 100644 --- a/helm/funcx_endpoint/templates/endpoint-deployment.yaml +++ b/helm/funcx_endpoint/templates/endpoint-deployment.yaml @@ -21,14 +21,14 @@ spec: labels: app: {{ .Release.Name }}-endpoint spec: - serviceAccountName: {{ template "funcx_endpoint.fullname" . }} + serviceAccountName: {{ template "globus_compute_endpoint.fullname" . }} securityContext: fsGroup: 1000 containers: - name: {{ .Release.Name }}-endpoint image: {{ .Values.image.repository }}:{{ .Values.image.tag }} command: [ "/bin/bash", "-c", "--" ] - args: [ "/home/funcx/boot.sh {{ coalesce .Values.endpointName .Release.Name }} {{ .Values.endpointUUID }} {{ .Values.endpointCLIargs }}" ] + args: [ "/home/compute/boot.sh {{ coalesce .Values.endpointName .Release.Name }} {{ .Values.endpointUUID }} {{ .Values.endpointCLIargs }}" ] tty: true stdin: true imagePullPolicy: {{ .Values.image.pullPolicy }} @@ -55,27 +55,27 @@ spec: volumeMounts: - mountPath: "/mnt" - name: funcxmnt + name: computemnt {{- if .Values.useUserCredentials }} - - name: funcx-sdk-tokens - mountPath: /funcx/credentials + - name: compute-sdk-tokens + mountPath: /compute/credentials readOnly: true {{- end }} - name: endpoint-config - mountPath: /funcx/config + mountPath: /compute/config - name: endpoint-instance-config - mountPath: /funcx/ep_instance + mountPath: /compute/ep_instance ports: - containerPort: 5000 volumes: - - name: funcxmnt + - name: computemnt emptyDir: {} {{- if .Values.useUserCredentials }} - - name: funcx-sdk-tokens + - name: compute-sdk-tokens secret: - secretName: funcx-sdk-tokens + secretName: compute-sdk-tokens {{- end }} - name: endpoint-config configMap: diff --git a/helm/funcx_endpoint/templates/endpoint-instance-config.yaml b/helm/funcx_endpoint/templates/endpoint-instance-config.yaml index 9fc15618b..66015a81c 100644 --- a/helm/funcx_endpoint/templates/endpoint-instance-config.yaml +++ b/helm/funcx_endpoint/templates/endpoint-instance-config.yaml @@ -9,10 +9,10 @@ metadata: app: {{ .Release.Name }} data: config.py: | - from funcx_endpoint.endpoint.utils.config import Config - from funcx_endpoint.executors import HighThroughputExecutor - from funcx_endpoint.providers.kubernetes.kube import KubernetesProvider - from funcx_endpoint.strategies import KubeSimpleStrategy + from globus_compute_endpoint.endpoint.utils.config import Config + from globus_compute_endpoint.executors import HighThroughputExecutor + from globus_compute_endpoint.providers.kubernetes.kube import KubernetesProvider + from globus_compute_endpoint.strategies import KubeSimpleStrategy from parsl.addresses import address_by_route config = Config( diff --git a/helm/funcx_endpoint/templates/role.yaml b/helm/funcx_endpoint/templates/role.yaml index 53765ea3e..6d4828f4f 100644 --- a/helm/funcx_endpoint/templates/role.yaml +++ b/helm/funcx_endpoint/templates/role.yaml @@ -2,10 +2,10 @@ kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: {{ template "funcx_endpoint.fullname" . }}-worker-manager + name: {{ template "globus_compute_endpoint.fullname" . }}-worker-manager labels: - app: {{ template "funcx_endpoint.name" . }} - chart: {{ template "funcx_endpoint.chart" . }} + app: {{ template "globus_compute_endpoint.name" . }} + chart: {{ template "globus_compute_endpoint.chart" . }} release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" rules: diff --git a/helm/funcx_endpoint/templates/rolebinding.yaml b/helm/funcx_endpoint/templates/rolebinding.yaml index eefd82f4a..53055f9b7 100644 --- a/helm/funcx_endpoint/templates/rolebinding.yaml +++ b/helm/funcx_endpoint/templates/rolebinding.yaml @@ -2,18 +2,18 @@ kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: {{ template "funcx_endpoint.fullname" . }}-worker-manager + name: {{ template "globus_compute_endpoint.fullname" . }}-worker-manager labels: - app: {{ template "funcx_endpoint.name" . }} - chart: {{ template "funcx_endpoint.chart" . }} + app: {{ template "globus_compute_endpoint.name" . }} + chart: {{ template "globus_compute_endpoint.chart" . }} release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" subjects: - kind: ServiceAccount - name: {{ template "funcx_endpoint.fullname" . }} + name: {{ template "globus_compute_endpoint.fullname" . }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: {{ template "funcx_endpoint.fullname" . }}-worker-manager + name: {{ template "globus_compute_endpoint.fullname" . }}-worker-manager {{- end }} \ No newline at end of file diff --git a/helm/funcx_endpoint/templates/service_account.yaml b/helm/funcx_endpoint/templates/service_account.yaml index f1c74de2d..efddeec59 100644 --- a/helm/funcx_endpoint/templates/service_account.yaml +++ b/helm/funcx_endpoint/templates/service_account.yaml @@ -2,10 +2,10 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: {{ template "funcx_endpoint.fullname" . }} + name: {{ template "globus_compute_endpoint.fullname" . }} labels: - app: {{ template "funcx_endpoint.name" . }} - chart: {{ template "funcx_endpoint.chart" . }} + app: {{ template "globus_compute_endpoint.name" . }} + chart: {{ template "globus_compute_endpoint.chart" . }} release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" {{- end }} diff --git a/helm/funcx_endpoint/values.yaml b/helm/funcx_endpoint/values.yaml index f519449da..26623c72f 100644 --- a/helm/funcx_endpoint/values.yaml +++ b/helm/funcx_endpoint/values.yaml @@ -1,22 +1,22 @@ -# Default values for funcx_endpoint. +# Default values for globus_compute_endpoint. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 funcXServiceAddress: https://api2.funcx.org image: - repository: funcx/kube-endpoint + repository: compute/kube-endpoint tag: main-3.10 pullPolicy: Always workerDebug: false workerImage: python:3.10 -workerInit: pip install funcx-endpoint>=1.0.10 +workerInit: pip install globus-compute-endpoint>=2.0.0 workerNamespace: default logDir: /tmp/worker_logs rbacEnabled: true -nameOverride: funcx-endpoint +nameOverride: globus-compute-endpoint initMem: 500Mi maxMem: 1600Mi diff --git a/reinstall.sh b/reinstall.sh index 9464b409f..f923a0a08 100755 --- a/reinstall.sh +++ b/reinstall.sh @@ -1,5 +1,5 @@ -pip uninstall -y funcx-sdk funcx-endpoint -cd funcx_sdk +pip uninstall -y globus-compute-sdk globus-compute-endpoint +cd compute_sdk pip install . -cd ../funcx_endpoint +cd ../compute_endpoint pip install . diff --git a/release.sh b/release.sh index 3aeec9741..dd464938f 100755 --- a/release.sh +++ b/release.sh @@ -6,23 +6,23 @@ # tox release command for each package # # Requirements: -# the version is set in funcx_sdk -# the version is set in funcx_endpoint and matches funcx_sdk +# the version is set in globus_compute_sdk +# the version is set in globus_compute_endpoint and matches globus_compute_sdk # the version number must appear to be in use in the changelog # you must have valid git config to create a signed tag (GPG key) # you must have pypi credentials available to twine (e.g. ~/.pypirc) set -euo pipefail -VERSION="$(grep '^__version__' funcx_sdk/funcx/version.py | cut -d '"' -f 2)" -ENDPOINT_VERSION="$(grep '^__version__' funcx_endpoint/funcx_endpoint/version.py | cut -d '"' -f 2)" +VERSION="$(grep '^__version__' compute_sdk/globus_compute_sdk/version.py | cut -d '"' -f 2)" +ENDPOINT_VERSION="$(grep '^__version__' compute_endpoint/globus_compute_endpoint/version.py | cut -d '"' -f 2)" if [[ "$VERSION" != "$ENDPOINT_VERSION" ]]; then echo "package versions mismatched: sdk=$VERSION endpoint=$ENDPOINT_VERSION" exit 1 fi -if ! grep '^funcx \& funcx\-endpoint v'"$VERSION"'$' docs/changelog.rst; then +if ! grep '^compute\-sdk \& compute\-endpoint v'"$VERSION"'$' docs/changelog.rst; then echo "package version v$VERSION not noted in docs/changelog.rst" exit 1 fi @@ -30,9 +30,9 @@ fi echo "releasing v$VERSION" git tag -s "$VERSION" -m "v$VERSION" -pushd funcx_sdk +pushd globus_compute_sdk tox -e publish-release popd -cd funcx_endpoint +cd globus_compute_endpoint tox -e publish-release diff --git a/smoke_tests/Makefile b/smoke_tests/Makefile index 0e0a58569..27813d456 100644 --- a/smoke_tests/Makefile +++ b/smoke_tests/Makefile @@ -1,17 +1,17 @@ # Friendly reminder before running 'local'* smoke tests: -# $ export FUNCX_LOCAL_ENDPOINT_ID= +# $ export COMPUTE_LOCAL_ENDPOINT_ID= # and optionally: -# $ export FUNCX_LOCAL_KNOWN_FUNCTION_ID= +# $ export COMPUTE_LOCAL_KNOWN_FUNCTION_ID= .PHONY: prod dev local prod: tox dev: - tox -e localdeps -- --funcx-config dev + tox -e localdeps -- --compute-config dev local_with_dev_sdk: - @if [ -z "${FUNCX_LOCAL_ENDPOINT_ID}" ]; then echo "Missing exported FUNCX_LOCAL_ENDPOINT_ID"; exit 1; fi - tox -e localdeps -- --funcx-config local + @if [ -z "${COMPUTE_LOCAL_ENDPOINT_ID}" ]; then echo "Missing exported COMPUTE_LOCAL_ENDPOINT_ID"; exit 1; fi + tox -e localdeps -- --compute-config local local_with_published_sdk: - @if [ -z "${FUNCX_LOCAL_ENDPOINT_ID}" ]; then echo "Missing exported FUNCX_LOCAL_ENDPOINT_ID"; exit 1; fi - tox -- --funcx-config local + @if [ -z "${COMPUTE_LOCAL_ENDPOINT_ID}" ]; then echo "Missing exported COMPUTE_LOCAL_ENDPOINT_ID"; exit 1; fi + tox -- --compute-config local diff --git a/smoke_tests/README.md b/smoke_tests/README.md index 9ab5bfa80..e2466bee5 100644 --- a/smoke_tests/README.md +++ b/smoke_tests/README.md @@ -25,7 +25,7 @@ repo): You can also run `localdeps` against dev with - tox -e localdeps -- --funcx-config dev + tox -e localdeps -- --compute-config dev One can also run tests against a local webservice setup. Use the make targets `local_with_published_sdk` and `local_with_dev_sdk` to run tests with published @@ -37,8 +37,8 @@ One can also run tests against a local webservice setup. Use the make targets As with the above make targets, these are just wrappers around tox; can do the above by invoking tox direcly: - tox -- --funcx-config local - tox -e localdeps -- --funcx-config local + tox -- --compute-config local + tox -e localdeps -- --compute-config local ## Non-Pytest tests diff --git a/smoke_tests/tests/conftest.py b/smoke_tests/tests/conftest.py index e96808d70..4e2c87c0d 100644 --- a/smoke_tests/tests/conftest.py +++ b/smoke_tests/tests/conftest.py @@ -7,22 +7,21 @@ import time import pytest +from globus_compute_sdk import Client +from globus_compute_sdk.sdk.web_client import WebClient from globus_sdk import AccessTokenAuthorizer, AuthClient, ConfidentialAppAuthClient -from funcx import FuncXClient -from funcx.sdk.web_client import FuncxWebClient - # the non-tutorial endpoint will be required, with the following priority order for # finding the ID: # # 1. `--endpoint` opt -# 2. FUNX_LOCAL_ENDPOINT_ID (seen here) -# 3. FUNX_LOCAL_ENDPOINT_NAME (the name of a dir in `~/.funcx/`) +# 2. COMPUTE_LOCAL_ENDPOINT_ID (seen here) +# 3. COMPUTE_LOCAL_ENDPOINT_NAME (the name of a dir in `~/.funcx/`) # 4. An endpoint ID found in ~/.funcx/default/endpoint.json # # this var starts with the ID env var load -_LOCAL_ENDPOINT_ID = os.getenv("FUNCX_LOCAL_ENDPOINT_ID") -_LOCAL_FUNCTION_ID = os.getenv("FUNCX_LOCAL_KNOWN_FUNCTION_ID") +_LOCAL_ENDPOINT_ID = os.getenv("COMPUTE_LOCAL_ENDPOINT_ID") +_LOCAL_FUNCTION_ID = os.getenv("COMPUTE_LOCAL_KNOWN_FUNCTION_ID") _CONFIGS = { "dev": { @@ -60,7 +59,7 @@ def _get_local_endpoint_id(): # this is only called if # - there is no endpoint in the config (e.g. config via env var) # - `--endpoint` is not passed - local_endpoint_name = os.getenv("FUNCX_LOCAL_ENDPOINT_NAME", "default") + local_endpoint_name = os.getenv("COMPUTE_LOCAL_ENDPOINT_NAME", "default") data_path = os.path.join( os.path.expanduser("~"), ".funcx", local_endpoint_name, "endpoint.json" ) @@ -75,9 +74,9 @@ def _get_local_endpoint_id(): def pytest_addoption(parser): - """Add funcx-specific command-line options to pytest.""" + """Add command-line options to pytest.""" parser.addoption( - "--funcx-config", default="prod", help="Name of testing config to use" + "--compute-config", default="prod", help="Name of testing config to use" ) parser.addoption( "--endpoint", metavar="endpoint", help="Specify an active endpoint UUID" @@ -85,7 +84,7 @@ def pytest_addoption(parser): parser.addoption( "--service-address", metavar="service-address", - help="Specify a funcX service address", + help="Specify a Globus Compute service address", ) parser.addoption( "--ws-uri", metavar="ws-uri", help="WebSocket URI to get task results" @@ -93,8 +92,8 @@ def pytest_addoption(parser): @pytest.fixture(scope="session") -def funcx_test_config_name(pytestconfig): - return pytestconfig.getoption("--funcx-config") +def compute_test_config_name(pytestconfig): + return pytestconfig.getoption("--compute-config") def _add_args_for_client_creds_login(api_client_id, api_client_secret, client_args): @@ -114,7 +113,7 @@ def _add_args_for_client_creds_login(api_client_id, api_client_secret, client_ar auth_authorizer = AccessTokenAuthorizer(auth_token) try: - from funcx.sdk.login_manager import LoginManagerProtocol + from globus_compute_sdk.sdk.login_manager import LoginManagerProtocol except ImportError: client_args["fx_authorizer"] = funcx_authorizer client_args["openid_authorizer"] = auth_authorizer @@ -130,10 +129,8 @@ def logout(self) -> None: def get_auth_client(self) -> AuthClient: return AuthClient(authorizer=auth_authorizer) - def get_funcx_web_client( - self, *, base_url: str | None = None - ) -> FuncxWebClient: - return FuncxWebClient(base_url=base_url, authorizer=funcx_authorizer) + def get_web_client(self, *, base_url: str | None = None) -> WebClient: + return WebClient(base_url=base_url, authorizer=funcx_authorizer) login_manager = TestsuiteLoginManager() @@ -145,9 +142,9 @@ def get_funcx_web_client( @pytest.fixture(scope="session") -def funcx_test_config(pytestconfig, funcx_test_config_name): +def compute_test_config(pytestconfig, compute_test_config_name): # start with basic config load - config = _CONFIGS[funcx_test_config_name] + config = _CONFIGS[compute_test_config_name] # if `--endpoint` was passed or `endpoint_uuid` is present in config, # handle those cases @@ -180,20 +177,20 @@ def funcx_test_config(pytestconfig, funcx_test_config_name): @pytest.fixture(scope="session") -def fxc(funcx_test_config): - client_args = funcx_test_config["client_args"] - fxc = FuncXClient(**client_args) - return fxc +def compute_client(compute_test_config): + client_args = compute_test_config["client_args"] + gcc = Client(**client_args) + return gcc @pytest.fixture -def endpoint(funcx_test_config): - return funcx_test_config["endpoint_uuid"] +def endpoint(compute_test_config): + return compute_test_config["endpoint_uuid"] @pytest.fixture -def tutorial_function_id(funcx_test_config): - funcid = funcx_test_config.get("public_hello_fn_uuid") +def tutorial_function_id(compute_test_config): + funcid = compute_test_config.get("public_hello_fn_uuid") if not funcid: pytest.skip("test requires a pre-defined public hello function") return funcid @@ -205,12 +202,12 @@ def tutorial_function_id(funcx_test_config): @pytest.fixture -def submit_function_and_get_result(fxc): +def submit_function_and_get_result(compute_client): def submit_fn( endpoint_id, func=None, func_args=None, func_kwargs=None, initial_sleep=0 ): if callable(func): - func_id = fxc.register_function(func) + func_id = compute_client.register_function(func) else: func_id = func @@ -219,7 +216,7 @@ def submit_fn( if func_kwargs is None: func_kwargs = {} - task_id = fxc.run( + task_id = compute_client.run( *func_args, endpoint_id=endpoint_id, function_id=func_id, **func_kwargs ) @@ -229,7 +226,7 @@ def submit_fn( result = None response = None for attempt in range(10): - response = fxc.get_task(task_id) + response = compute_client.get_task(task_id) if response.get("pending") is False: result = response.get("result") else: diff --git a/smoke_tests/tests/test_running_functions.py b/smoke_tests/tests/test_running_functions.py index 0ebd525f1..a258b82b6 100644 --- a/smoke_tests/tests/test_running_functions.py +++ b/smoke_tests/tests/test_running_functions.py @@ -1,20 +1,19 @@ import concurrent.futures import time +import globus_compute_sdk as gc import pytest import requests +from globus_compute_sdk import Executor from packaging.version import Version -import funcx -from funcx import FuncXExecutor - try: - from funcx.errors import TaskPending + from globus_compute_sdk.errors import TaskPending except ImportError: - from funcx.utils.errors import TaskPending + from globus_compute_sdk.utils.errors import TaskPending -sdk_version = Version(funcx.version.__version__) +sdk_version = Version(gc.version.__version__) def test_run_pre_registered_function( @@ -37,23 +36,23 @@ def ohai(): @pytest.mark.skipif(sdk_version.release < (1, 0, 5), reason="batch.add iface updated") -def test_batch(fxc, endpoint): +def test_batch(compute_client, endpoint): """Test batch submission and get_batch_result""" - double_fn_id = fxc.register_function(double) + double_fn_id = compute_client.register_function(double) inputs = list(range(10)) - batch = fxc.create_batch() + batch = compute_client.create_batch() for x in inputs: batch.add(double_fn_id, endpoint, args=(x,)) - batch_res = fxc.batch_run(batch) + batch_res = compute_client.batch_run(batch) total = 0 for _i in range(12): time.sleep(5) - results = fxc.get_batch_result(batch_res) + results = compute_client.get_batch_result(batch_res) try: total = sum(results[tid]["result"] for tid in results) break @@ -63,14 +62,14 @@ def test_batch(fxc, endpoint): assert total == 2 * (sum(inputs)), "Batch run results do not add up" -def test_wait_on_new_hello_world_func(fxc, endpoint): - func_id = fxc.register_function(ohai) - task_id = fxc.run(endpoint_id=endpoint, function_id=func_id) +def test_wait_on_new_hello_world_func(compute_client, endpoint): + func_id = compute_client.register_function(ohai) + task_id = compute_client.run(endpoint_id=endpoint, function_id=func_id) got_result = False for _ in range(30): try: - result = fxc.get_result(task_id) + result = compute_client.get_result(task_id) got_result = True except TaskPending: time.sleep(1) @@ -79,10 +78,10 @@ def test_wait_on_new_hello_world_func(fxc, endpoint): assert result == "ohai" -def test_executor(fxc, endpoint, tutorial_function_id): - """Test using FuncXExecutor to retrieve results.""" +def test_executor(compute_client, endpoint, tutorial_function_id): + """Test using Executor to retrieve results.""" - url = f"{fxc.funcx_service_address}/version" + url = f"{compute_client.funcx_service_address}/version" res = requests.get(url) assert res.status_code == 200, f"Received {res.status_code} instead!" @@ -98,10 +97,10 @@ def test_executor(fxc, endpoint, tutorial_function_id): num_tasks = 10 submit_count = 2 # we've had at least one bug that prevented executor re-use - with FuncXExecutor(endpoint_id=endpoint, funcx_client=fxc) as fxe: + with Executor(endpoint_id=endpoint, funcx_client=compute_client) as gce: for _ in range(submit_count): futures = [ - fxe.submit_to_registered_function(tutorial_function_id) + gce.submit_to_registered_function(tutorial_function_id) for _ in range(num_tasks) ] @@ -116,7 +115,7 @@ def test_executor(fxc, endpoint, tutorial_function_id): "Hello World!" == item for item in results ), f"Invalid result: {results}" - futures = list(fxe.reload_tasks()) + futures = list(gce.reload_tasks()) assert len(futures) == submit_count * num_tasks results = [] diff --git a/smoke_tests/tests/test_s3_indirect.py b/smoke_tests/tests/test_s3_indirect.py index 69598e2a1..548dcb063 100644 --- a/smoke_tests/tests/test_s3_indirect.py +++ b/smoke_tests/tests/test_s3_indirect.py @@ -1,7 +1,7 @@ import pytest try: - from funcx.errors import FuncxTaskExecutionFailed + from globus_compute_sdk.errors import TaskExecutionFailed has_task_exec_error_type = True except ImportError: @@ -18,7 +18,7 @@ def large_arg_consumer(data: str) -> int: @pytest.mark.parametrize("size", [200, 2000, 20000, 200000]) def test_allowed_result_sizes(submit_function_and_get_result, endpoint, size): - """funcX should allow all listed result sizes which are under 512KB limit""" + """Globus Compute should allow all listed result sizes under 512KB limit""" r = submit_function_and_get_result( endpoint, func=large_result_producer, func_args=(size,) ) @@ -30,11 +30,11 @@ def test_allowed_result_sizes(submit_function_and_get_result, endpoint, size): ) def test_result_size_too_large(submit_function_and_get_result, endpoint): """ - funcX should raise a MaxResultSizeExceeded exception when results exceeds 10MB - limit + Globus Compute should raise a MaxResultSizeExceeded exception when results exceeds + 10MB limit """ - # SDK wraps remote execution failures in FuncxTaskExecutionFailed exceptions... - with pytest.raises(FuncxTaskExecutionFailed) as excinfo: + # SDK wraps remote execution failures in TaskExecutionFailed exceptions... + with pytest.raises(TaskExecutionFailed) as excinfo: submit_function_and_get_result( endpoint, func=large_result_producer, func_args=(11 * 1024 * 1024,) ) @@ -44,7 +44,7 @@ def test_result_size_too_large(submit_function_and_get_result, endpoint): @pytest.mark.parametrize("size", [200, 2000, 20000, 200000]) def test_allowed_arg_sizes(submit_function_and_get_result, endpoint, size): - """funcX should allow all listed result sizes which are under 512KB limit""" + """Globus Compute should allow all listed result sizes under 512KB limit""" r = submit_function_and_get_result( endpoint, func=large_arg_consumer, func_args=(bytearray(size),) ) @@ -53,7 +53,7 @@ def test_allowed_arg_sizes(submit_function_and_get_result, endpoint, size): @pytest.mark.skip(reason="As of 0.3.4, an arg size limit is not being enforced") def test_arg_size_too_large(submit_function_and_get_result, endpoint, size=55000000): - """funcX should raise an exception for objects larger than some limit, + """Globus Compute should raise an exception for objects larger than some limit, which we are yet to define. This does not work right now. """ diff --git a/smoke_tests/tests/test_version.py b/smoke_tests/tests/test_version.py index 784dd2d4d..46271c54a 100644 --- a/smoke_tests/tests/test_version.py +++ b/smoke_tests/tests/test_version.py @@ -2,9 +2,9 @@ from packaging.version import Version -def test_web_service(fxc, endpoint, funcx_test_config): - """This test checks 1) web-service is online, 2) version of the funcx-web-service""" - service_address = fxc.funcx_service_address +def test_web_service(compute_client, endpoint, compute_test_config): + """This test checks 1) web-service is online, 2) version of the web-service""" + service_address = compute_client.funcx_service_address response = requests.get(f"{service_address}/version") @@ -14,7 +14,7 @@ def test_web_service(fxc, endpoint, funcx_test_config): ) service_version = response.json() - api_min_version = funcx_test_config.get("api_min_version") + api_min_version = compute_test_config.get("api_min_version") if api_min_version is not None: parsed_min = Version(api_min_version) parsed_service = Version(service_version) @@ -27,15 +27,15 @@ def say_hello(): return "Hello World!" -def test_simple_function(fxc): +def test_simple_function(compute_client): """Test whether we can register a function""" - func_uuid = fxc.register_function(say_hello) + func_uuid = compute_client.register_function(say_hello) assert func_uuid is not None, "Invalid function uuid returned" -def test_ep_status(fxc, endpoint): +def test_ep_status(compute_client, endpoint): """Test whether the tutorial EP is online and reporting status""" - response = fxc.get_endpoint_status(endpoint) + response = compute_client.get_endpoint_status(endpoint) assert ( response["status"] == "online" diff --git a/smoke_tests/tox.ini b/smoke_tests/tox.ini index a10723984..e110ea159 100644 --- a/smoke_tests/tox.ini +++ b/smoke_tests/tox.ini @@ -4,17 +4,17 @@ envlist = py [testenv] passenv = - FUNCX_LOCAL_ENDPOINT_ID - FUNCX_LOCAL_ENDPOINT_NAME - FUNCX_LOCAL_KNOWN_FUNCTION_ID + COMPUTE_LOCAL_ENDPOINT_ID + COMPUTE_LOCAL_ENDPOINT_NAME + COMPUTE_LOCAL_KNOWN_FUNCTION_ID FUNCX_SMOKE_CLIENT_ID FUNCX_SMOKE_CLIENT_SECRET # don't try to install a package skip_install = true deps = - funcx - funcx-endpoint - funcx-common + globus-compute-sdk + globus-compute-endpoint + globus-compute-common pytest allowlist_externals = /bin/bash commands = @@ -23,16 +23,16 @@ commands = [testenv:localdeps] passenv = - FUNCX_LOCAL_ENDPOINT_ID - FUNCX_LOCAL_ENDPOINT_NAME - FUNCX_LOCAL_KNOWN_FUNCTION_ID + COMPUTE_LOCAL_ENDPOINT_ID + COMPUTE_LOCAL_ENDPOINT_NAME + COMPUTE_LOCAL_KNOWN_FUNCTION_ID FUNCX_SMOKE_CLIENT_ID FUNCX_SMOKE_CLIENT_SECRET # don't try to install a package skip_install = true deps = - -e ../funcx_sdk - -e ../funcx_endpoint + -e ../compute_sdk + -e ../compute_endpoint pytest allowlist_externals = /bin/bash commands =