From e808b5c9546df4c3606cc081ac2036a66b608887 Mon Sep 17 00:00:00 2001 From: Vanessasaurus <814322+vsoch@users.noreply.github.com> Date: Sat, 28 Jan 2023 16:55:04 -0800 Subject: [PATCH] small tweaks to submit (#24) * small tweaks to submit so that variables like requests/limits are carried through * do not check flux-operator.yaml file * adding support for saving additional metadata about cluster nodes * fix data formatting * skip check of metadata files, clusters likely to be different Signed-off-by: vsoch --- CHANGELOG.md | 1 + docs/getting_started/commands.md | 35 +- fluxcloud/client/helpers.py | 5 +- fluxcloud/main/client.py | 11 +- .../main/clouds/aws/scripts/cluster-create | 7 +- .../main/clouds/google/scripts/cluster-create | 4 + .../local/scripts/cluster-create-minikube | 11 + .../main/clouds/shared/scripts/helpers.sh | 22 + fluxcloud/main/experiment.py | 31 +- fluxcloud/version.py | 2 +- .../.scripts/cluster-create-minikube.sh | 39 +- .../.scripts/cluster-destroy-minikube.sh | 24 +- .../.scripts/flux-operator.yaml | 4 + .../.scripts/kubectl-version.yaml | 22 + ...uster-run-lmp-size-2-minicluster-size-2.sh | 26 +- ...uster-run-lmp-size-4-minicluster-size-4.sh | 26 +- .../.scripts/minicluster-size-2.yaml | 0 .../.scripts/minicluster-size-4.yaml | 0 .../.scripts/minikube-version.yaml | 14 + .../.scripts/nodes-size-4.json | 722 ++++++++++++++++++ .../.scripts/nodes-size-4.txt | 336 ++++++++ .../lmp-size-2-minicluster-size-2/log.out | 22 +- .../lmp-size-4-minicluster-size-4/log.out | 18 +- .../{ => minikube}/k8s-size-4-local/meta.json | 8 +- tests/test.sh | 9 + 25 files changed, 1330 insertions(+), 69 deletions(-) rename tests/lammps/data/{ => minikube}/k8s-size-4-local/.scripts/cluster-create-minikube.sh (77%) rename tests/lammps/data/{ => minikube}/k8s-size-4-local/.scripts/cluster-destroy-minikube.sh (83%) rename tests/lammps/data/{ => minikube}/k8s-size-4-local/.scripts/flux-operator.yaml (99%) create mode 100644 tests/lammps/data/minikube/k8s-size-4-local/.scripts/kubectl-version.yaml rename tests/lammps/data/{ => minikube}/k8s-size-4-local/.scripts/minicluster-run-lmp-size-2-minicluster-size-2.sh (87%) rename tests/lammps/data/{ => minikube}/k8s-size-4-local/.scripts/minicluster-run-lmp-size-4-minicluster-size-4.sh (87%) rename tests/lammps/data/{ => minikube}/k8s-size-4-local/.scripts/minicluster-size-2.yaml (100%) rename tests/lammps/data/{ => minikube}/k8s-size-4-local/.scripts/minicluster-size-4.yaml (100%) create mode 100644 tests/lammps/data/minikube/k8s-size-4-local/.scripts/minikube-version.yaml create mode 100644 tests/lammps/data/minikube/k8s-size-4-local/.scripts/nodes-size-4.json create mode 100644 tests/lammps/data/minikube/k8s-size-4-local/.scripts/nodes-size-4.txt rename tests/lammps/data/{ => minikube}/k8s-size-4-local/lmp-size-2-minicluster-size-2/log.out (81%) rename tests/lammps/data/{ => minikube}/k8s-size-4-local/lmp-size-4-minicluster-size-4/log.out (83%) rename tests/lammps/data/{ => minikube}/k8s-size-4-local/meta.json (75%) diff --git a/CHANGELOG.md b/CHANGELOG.md index b86694a..1c0e86d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ and **Merged pull requests**. Critical items to know are: The versions coincide with releases on pip. Only major versions will be released as tags on Github. ## [0.0.x](https://github.com/converged-computing/flux-cloud/tree/main) (0.0.x) + - data should be namespaced by cloud type (so multiple experiments can be run alongside) (0.1.17) - add flux-cloud ui to just bring up (and down) a user interface (0.1.16) - support for submit and batch, to run jobs on the same MiniCluster (0.1.15) - minikube docker pull needs message, update tests and typo (0.1.14) diff --git a/docs/getting_started/commands.md b/docs/getting_started/commands.md index 3e86f78..cca4926 100644 --- a/docs/getting_started/commands.md +++ b/docs/getting_started/commands.md @@ -295,21 +295,30 @@ managedNodeGroups: By default, flux cloud keeps all scripts that the job renders in the experiment output directory under `.scripts`. If you want to cleanup instead, you can add the `--cleanup` flag. We do this so you can inspect a script to debug, or if you just want to keep them for reproducibility. As an example, here is outfrom from a run with multiple repeats of the -same command, across two MiniCluster cluster sizes (2 and 4): +same command, across two MiniCluster cluster sizes (2 and 4). As of version `0.1.17` the data is also organized +by the runner (e.g., minikube vs google) so you can run the experiments across multiple clouds without conflict. ```console -$ tree data/k8s-size-4-n1-standard-1/.scripts/ -├── cluster-create.sh -├── cluster-destroy.sh -├── eksctl-config.yaml -├── flux-operator.yaml -├── minicluster-run-lmp-16-10-minicluster-size-16.sh -├── minicluster-run-lmp-16-11-minicluster-size-16.sh -├── minicluster-run-lmp-16-12-minicluster-size-16.sh -... -├── minicluster-run-lmp-64-8-minicluster-size-64.sh -├── minicluster-run-lmp-64-9-minicluster-size-64.sh -└── minicluster.yaml +$ tree -a ./data/ +./data/ +└── minikube + └── k8s-size-4-local + ├── lmp-size-2-minicluster-size-2 + │ └── log.out + ├── lmp-size-4-minicluster-size-4 + │ └── log.out + ├── meta.json + └── .scripts + ├── cluster-create-minikube.sh + ├── flux-operator.yaml + ├── kubectl-version.yaml + ├── minicluster-run-lmp-size-2-minicluster-size-2.sh + ├── minicluster-run-lmp-size-4-minicluster-size-4.sh + ├── minicluster-size-2.yaml + ├── minicluster-size-4.yaml + ├── minikube-version.json + ├── nodes-size-4.json + └── nodes-size-4.txt ``` And that's it! I think there might be a more elegant way to determine what cluster is running, diff --git a/fluxcloud/client/helpers.py b/fluxcloud/client/helpers.py index 5cefd9a..d9973d4 100644 --- a/fluxcloud/client/helpers.py +++ b/fluxcloud/client/helpers.py @@ -3,6 +3,8 @@ # # SPDX-License-Identifier: Apache-2.0 +import os + import fluxcloud.utils as utils from fluxcloud.logger import logger from fluxcloud.main import get_experiment_client @@ -21,7 +23,8 @@ def prepare_client(args, extra): force_cluster=args.force_cluster, template=args.template, cleanup=args.cleanup, - outdir=args.output_dir, + # Ensure the output directory is namespaced by the cloud name + outdir=os.path.join(args.output_dir, cli.name), test=args.test, quiet=True, ) diff --git a/fluxcloud/main/client.py b/fluxcloud/main/client.py index 18ea046..869fba7 100644 --- a/fluxcloud/main/client.py +++ b/fluxcloud/main/client.py @@ -108,10 +108,13 @@ def open_ui(self, setup, experiment, size, api=None, persistent=False): logger.info(f"\n🌀 Bringing up MiniCluster of size {size}") - # Get the global "job" for the size (and validate only one image) - # This will raise error if > 1 image, or no image. - image = experiment.get_persistent_image(size) - job = {"image": image, "token": api.token, "user": api.user} + # Get persistent variables for this job size, image is required + job = experiment.get_persistent_variables(size, required=["image"]) + job.update({"token": api.token, "user": api.user}) + + # We can't have a command + if "command" in job: + del job["command"] # Pre-pull containers, etc. if hasattr(self, "pre_apply"): diff --git a/fluxcloud/main/clouds/aws/scripts/cluster-create b/fluxcloud/main/clouds/aws/scripts/cluster-create index 25083eb..97cad0c 100755 --- a/fluxcloud/main/clouds/aws/scripts/cluster-create +++ b/fluxcloud/main/clouds/aws/scripts/cluster-create @@ -62,10 +62,11 @@ fi run_echo eksctl create cluster -f ${CONFIG_FILE} -# Show nodes -run_echo kubectl get nodes - # Deploy the operator TODO should be variables here install_operator ${SCRIPT_DIR} ${REPOSITORY} ${BRANCH} run_echo kubectl get namespace run_echo kubectl describe namespace operator-system + +# Save versions of kubectl, eksctl +run_echo_save "${SCRIPT_DIR}/eksctl-version.json" eksctl version --output=json -d --verbose 5 +save_common_metadata ${SCRIPT_DIR} ${SIZE} diff --git a/fluxcloud/main/clouds/google/scripts/cluster-create b/fluxcloud/main/clouds/google/scripts/cluster-create index d543760..0026bcd 100755 --- a/fluxcloud/main/clouds/google/scripts/cluster-create +++ b/fluxcloud/main/clouds/google/scripts/cluster-create @@ -77,3 +77,7 @@ install_operator ${SCRIPT_DIR} ${REPOSITORY} ${BRANCH} run_echo kubectl get namespace run_echo kubectl describe namespace operator-system + +# Save versions of kubectl, gcloud +run_echo_save "${SCRIPT_DIR}/gcloud-version.json" gcloud version --format=json +save_common_metadata ${SCRIPT_DIR} ${SIZE} diff --git a/fluxcloud/main/clouds/local/scripts/cluster-create-minikube b/fluxcloud/main/clouds/local/scripts/cluster-create-minikube index ffd4dc7..f3294e2 100755 --- a/fluxcloud/main/clouds/local/scripts/cluster-create-minikube +++ b/fluxcloud/main/clouds/local/scripts/cluster-create-minikube @@ -21,12 +21,22 @@ print_magenta " branch : ${BRANCH}" is_installed minikube is_installed wget +function save_versions () { + + SCRIPT_DIR=${1} + SIZE=${2} + + run_echo_save "${SCRIPT_DIR}/minikube-version.yaml" minikube version --output=yaml --components=true + save_common_metadata ${SCRIPT_DIR} ${SIZE} +} + # Check if it already exists minikube status retval=$? if [[ "${retval}" == "0" ]]; then print_blue "A MiniKube cluster already exists." install_operator ${SCRIPT_DIR} ${REPOSITORY} ${BRANCH} + save_versions ${SCRIPT_DIR} ${SIZE} echo exit 0 fi @@ -44,3 +54,4 @@ run_echo kubectl get nodes run_echo kubectl get namespace run_echo kubectl describe namespace operator-system +save_versions ${SCRIPT_DIR} ${SIZE} diff --git a/fluxcloud/main/clouds/shared/scripts/helpers.sh b/fluxcloud/main/clouds/shared/scripts/helpers.sh index ce257ff..ddcae6b 100644 --- a/fluxcloud/main/clouds/shared/scripts/helpers.sh +++ b/fluxcloud/main/clouds/shared/scripts/helpers.sh @@ -47,6 +47,20 @@ function install_operator() { kubectl apply -f $tmpfile } +function save_common_metadata() { + # Save common versions across clouds for kubectl and the cluster nodes + SCRIPT_DIR="${1}" + SIZE="${2}" + + run_echo_save "${SCRIPT_DIR}/kubectl-version.yaml" kubectl version --output=yaml + + # Show nodes and save metadata to script directory + run_echo kubectl get nodes + run_echo_save "${SCRIPT_DIR}/nodes-size-${SIZE}.json" kubectl get nodes -o json + run_echo_save "${SCRIPT_DIR}/nodes-size-${SIZE}.txt" kubectl describe nodes +} + + function run_echo() { # Show the user the command then run it @@ -55,6 +69,14 @@ function run_echo() { retry $@ } +function run_echo_save() { + echo + save_to="${1}" + shift + print_green "$@ > ${save_to}" + $@ > ${save_to} +} + function run_echo_allow_fail() { echo print_green "$@" diff --git a/fluxcloud/main/experiment.py b/fluxcloud/main/experiment.py index 1326271..e9173ca 100644 --- a/fluxcloud/main/experiment.py +++ b/fluxcloud/main/experiment.py @@ -161,7 +161,7 @@ def variables(self): @property def root_dir(self): """ - Consistent means to get experiment. + Consistent means to get experiment, also namespaced to cloud/runner. """ return os.path.join(self.outdir, self.expid) @@ -193,31 +193,32 @@ def iter_jobs(self): yield size, jobname, job - def get_persistent_image(self, size): + def get_persistent_variables(self, size, required=None): """ - A persistent image is a job image used across a size of MiniCluster + Get persistent variables that should be used across the MiniCluster """ - image = None + jobvars = {} for _, job in self.jobs.items(): # Skip jobs targeted for a different size if "size" in job and job["size"] != size: continue - if "image" in job and not image: - image = job["image"] - continue - if "image" in job and image != job["image"]: - raise ValueError( - f"Submit uses a consistent container image, but found two images under size {size}: {image} and {job['image']}" + for key, value in job.items(): + if key not in jobvars or (key in jobvars and jobvars[key] == value): + jobvars[key] = value + continue + logger.warning( + f'Inconsistent job variable between MiniCluster jobs: {value} vs. {jobvars["value"]}' ) # If we get here and we don't have an image - if not image: - raise ValueError( - 'Submit requires a container "image" under at least one job spec to create the MiniCluster.' - ) - return image + for req in required or []: + if req not in jobvars: + raise ValueError( + f'Submit requires a "{req}" field under at least one job spec to create the MiniCluster.' + ) + return jobvars @property def script_dir(self): diff --git a/fluxcloud/version.py b/fluxcloud/version.py index faf0826..cd082ce 100644 --- a/fluxcloud/version.py +++ b/fluxcloud/version.py @@ -1,7 +1,7 @@ # Copyright 2022-2023 Lawrence Livermore National Security, LLC # SPDX-License-Identifier: Apache-2.0 -__version__ = "0.1.16" +__version__ = "0.1.17" AUTHOR = "Vanessa Sochat" EMAIL = "vsoch@users.noreply.github.com" NAME = "flux-cloud" diff --git a/tests/lammps/data/k8s-size-4-local/.scripts/cluster-create-minikube.sh b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/cluster-create-minikube.sh similarity index 77% rename from tests/lammps/data/k8s-size-4-local/.scripts/cluster-create-minikube.sh rename to tests/lammps/data/minikube/k8s-size-4-local/.scripts/cluster-create-minikube.sh index 77fe414..8e9031a 100755 --- a/tests/lammps/data/k8s-size-4-local/.scripts/cluster-create-minikube.sh +++ b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/cluster-create-minikube.sh @@ -50,6 +50,20 @@ function install_operator() { kubectl apply -f $tmpfile } +function save_common_metadata() { + # Save common versions across clouds for kubectl and the cluster nodes + SCRIPT_DIR="${1}" + SIZE="${2}" + + run_echo_save "${SCRIPT_DIR}/kubectl-version.yaml" kubectl version --output=yaml + + # Show nodes and save metadata to script directory + run_echo kubectl get nodes + run_echo_save "${SCRIPT_DIR}/nodes-size-${SIZE}.json" kubectl get nodes -o json + run_echo_save "${SCRIPT_DIR}/nodes-size-${SIZE}.txt" kubectl describe nodes +} + + function run_echo() { # Show the user the command then run it @@ -58,6 +72,14 @@ function run_echo() { retry $@ } +function run_echo_save() { + echo + save_to="${1}" + shift + print_green "$@ > ${save_to}" + $@ > ${save_to} +} + function run_echo_allow_fail() { echo print_green "$@" @@ -131,11 +153,11 @@ function with_exponential_backoff { # Defaults - these are in the config but left here for information CLUSTER_NAME="flux-cluster" CLUSTER_VERSION="1.23" -FORCE_CLUSTER="true" +FORCE_CLUSTER="false" SIZE=4 REPOSITORY="flux-framework/flux-operator" BRANCH="main" -SCRIPT_DIR="/tmp/lammps-data-PeHJF2/k8s-size-4-local/.scripts" +SCRIPT_DIR="/home/vanessa/Desktop/Code/flux/flux-cloud/tests/lammps/data/minikube/k8s-size-4-local/.scripts" print_magenta " cluster : ${CLUSTER_NAME}" print_magenta " version : ${CLUSTER_VERSION}" @@ -146,12 +168,22 @@ print_magenta " branch : ${BRANCH}" is_installed minikube is_installed wget +function save_versions () { + + SCRIPT_DIR=${1} + SIZE=${2} + + run_echo_save "${SCRIPT_DIR}/minikube-version.yaml" minikube version --output=yaml --components=true + save_common_metadata ${SCRIPT_DIR} ${SIZE} +} + # Check if it already exists minikube status retval=$? if [[ "${retval}" == "0" ]]; then print_blue "A MiniKube cluster already exists." install_operator ${SCRIPT_DIR} ${REPOSITORY} ${BRANCH} + save_versions ${SCRIPT_DIR} ${SIZE} echo exit 0 fi @@ -168,4 +200,5 @@ install_operator ${SCRIPT_DIR} ${REPOSITORY} ${BRANCH} run_echo kubectl get nodes run_echo kubectl get namespace -run_echo kubectl describe namespace operator-system \ No newline at end of file +run_echo kubectl describe namespace operator-system +save_versions ${SCRIPT_DIR} ${SIZE} \ No newline at end of file diff --git a/tests/lammps/data/k8s-size-4-local/.scripts/cluster-destroy-minikube.sh b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/cluster-destroy-minikube.sh similarity index 83% rename from tests/lammps/data/k8s-size-4-local/.scripts/cluster-destroy-minikube.sh rename to tests/lammps/data/minikube/k8s-size-4-local/.scripts/cluster-destroy-minikube.sh index 389d1bc..b980619 100755 --- a/tests/lammps/data/k8s-size-4-local/.scripts/cluster-destroy-minikube.sh +++ b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/cluster-destroy-minikube.sh @@ -50,6 +50,20 @@ function install_operator() { kubectl apply -f $tmpfile } +function save_common_metadata() { + # Save common versions across clouds for kubectl and the cluster nodes + SCRIPT_DIR="${1}" + SIZE="${2}" + + run_echo_save "${SCRIPT_DIR}/kubectl-version.yaml" kubectl version --output=yaml + + # Show nodes and save metadata to script directory + run_echo kubectl get nodes + run_echo_save "${SCRIPT_DIR}/nodes-size-${SIZE}.json" kubectl get nodes -o json + run_echo_save "${SCRIPT_DIR}/nodes-size-${SIZE}.txt" kubectl describe nodes +} + + function run_echo() { # Show the user the command then run it @@ -58,6 +72,14 @@ function run_echo() { retry $@ } +function run_echo_save() { + echo + save_to="${1}" + shift + print_green "$@ > ${save_to}" + $@ > ${save_to} +} + function run_echo_allow_fail() { echo print_green "$@" @@ -129,7 +151,7 @@ function with_exponential_backoff { } # Defaults - these are in the config but left here for information -FORCE_CLUSTER="true" +FORCE_CLUSTER="false" is_installed minikube is_installed yes diff --git a/tests/lammps/data/k8s-size-4-local/.scripts/flux-operator.yaml b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/flux-operator.yaml similarity index 99% rename from tests/lammps/data/k8s-size-4-local/.scripts/flux-operator.yaml rename to tests/lammps/data/minikube/k8s-size-4-local/.scripts/flux-operator.yaml index b4bc03e..38863d1 100644 --- a/tests/lammps/data/k8s-size-4-local/.scripts/flux-operator.yaml +++ b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/flux-operator.yaml @@ -202,6 +202,10 @@ spec: description: Logging modes determine the output you see in the job log properties: + debug: + default: false + description: Debug mode adds extra verbosity to Flux + type: boolean quiet: default: false description: Quiet mode silences all output so the job only shows diff --git a/tests/lammps/data/minikube/k8s-size-4-local/.scripts/kubectl-version.yaml b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/kubectl-version.yaml new file mode 100644 index 0000000..8216c96 --- /dev/null +++ b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/kubectl-version.yaml @@ -0,0 +1,22 @@ +clientVersion: + buildDate: "2023-01-18T15:58:16Z" + compiler: gc + gitCommit: 8f94681cd294aa8cfd3407b8191f6c70214973a4 + gitTreeState: clean + gitVersion: v1.26.1 + goVersion: go1.19.5 + major: "1" + minor: "26" + platform: linux/amd64 +kustomizeVersion: v4.5.7 +serverVersion: + buildDate: "2022-07-13T14:23:26Z" + compiler: gc + gitCommit: aef86a93758dc3cb2c658dd9657ab4ad4afc21cb + gitTreeState: clean + gitVersion: v1.24.3 + goVersion: go1.18.3 + major: "1" + minor: "24" + platform: linux/amd64 + diff --git a/tests/lammps/data/k8s-size-4-local/.scripts/minicluster-run-lmp-size-2-minicluster-size-2.sh b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/minicluster-run-lmp-size-2-minicluster-size-2.sh similarity index 87% rename from tests/lammps/data/k8s-size-4-local/.scripts/minicluster-run-lmp-size-2-minicluster-size-2.sh rename to tests/lammps/data/minikube/k8s-size-4-local/.scripts/minicluster-run-lmp-size-2-minicluster-size-2.sh index 9622222..4935321 100755 --- a/tests/lammps/data/k8s-size-4-local/.scripts/minicluster-run-lmp-size-2-minicluster-size-2.sh +++ b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/minicluster-run-lmp-size-2-minicluster-size-2.sh @@ -54,6 +54,20 @@ function install_operator() { kubectl apply -f $tmpfile } +function save_common_metadata() { + # Save common versions across clouds for kubectl and the cluster nodes + SCRIPT_DIR="${1}" + SIZE="${2}" + + run_echo_save "${SCRIPT_DIR}/kubectl-version.yaml" kubectl version --output=yaml + + # Show nodes and save metadata to script directory + run_echo kubectl get nodes + run_echo_save "${SCRIPT_DIR}/nodes-size-${SIZE}.json" kubectl get nodes -o json + run_echo_save "${SCRIPT_DIR}/nodes-size-${SIZE}.txt" kubectl describe nodes +} + + function run_echo() { # Show the user the command then run it @@ -62,6 +76,14 @@ function run_echo() { retry $@ } +function run_echo_save() { + echo + save_to="${1}" + shift + print_green "$@ > ${save_to}" + $@ > ${save_to} +} + function run_echo_allow_fail() { echo print_green "$@" @@ -133,9 +155,9 @@ function with_exponential_backoff { } NAMESPACE="flux-operator" -CRD="/tmp/lammps-data-PeHJF2/k8s-size-4-local/.scripts/minicluster-size-2.yaml" +CRD="/home/vanessa/Desktop/Code/flux/flux-cloud/tests/lammps/data/minikube/k8s-size-4-local/.scripts/minicluster-size-2.yaml" JOB="lammps" -LOGFILE="/tmp/lammps-data-PeHJF2/k8s-size-4-local/lmp-size-2-minicluster-size-2/log.out" +LOGFILE="/home/vanessa/Desktop/Code/flux/flux-cloud/tests/lammps/data/minikube/k8s-size-4-local/lmp-size-2-minicluster-size-2/log.out" print_magenta " apply : ${CRD}" print_magenta " job : ${JOB}" diff --git a/tests/lammps/data/k8s-size-4-local/.scripts/minicluster-run-lmp-size-4-minicluster-size-4.sh b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/minicluster-run-lmp-size-4-minicluster-size-4.sh similarity index 87% rename from tests/lammps/data/k8s-size-4-local/.scripts/minicluster-run-lmp-size-4-minicluster-size-4.sh rename to tests/lammps/data/minikube/k8s-size-4-local/.scripts/minicluster-run-lmp-size-4-minicluster-size-4.sh index 51d7299..78bd907 100755 --- a/tests/lammps/data/k8s-size-4-local/.scripts/minicluster-run-lmp-size-4-minicluster-size-4.sh +++ b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/minicluster-run-lmp-size-4-minicluster-size-4.sh @@ -54,6 +54,20 @@ function install_operator() { kubectl apply -f $tmpfile } +function save_common_metadata() { + # Save common versions across clouds for kubectl and the cluster nodes + SCRIPT_DIR="${1}" + SIZE="${2}" + + run_echo_save "${SCRIPT_DIR}/kubectl-version.yaml" kubectl version --output=yaml + + # Show nodes and save metadata to script directory + run_echo kubectl get nodes + run_echo_save "${SCRIPT_DIR}/nodes-size-${SIZE}.json" kubectl get nodes -o json + run_echo_save "${SCRIPT_DIR}/nodes-size-${SIZE}.txt" kubectl describe nodes +} + + function run_echo() { # Show the user the command then run it @@ -62,6 +76,14 @@ function run_echo() { retry $@ } +function run_echo_save() { + echo + save_to="${1}" + shift + print_green "$@ > ${save_to}" + $@ > ${save_to} +} + function run_echo_allow_fail() { echo print_green "$@" @@ -133,9 +155,9 @@ function with_exponential_backoff { } NAMESPACE="flux-operator" -CRD="/tmp/lammps-data-PeHJF2/k8s-size-4-local/.scripts/minicluster-size-4.yaml" +CRD="/home/vanessa/Desktop/Code/flux/flux-cloud/tests/lammps/data/minikube/k8s-size-4-local/.scripts/minicluster-size-4.yaml" JOB="lammps" -LOGFILE="/tmp/lammps-data-PeHJF2/k8s-size-4-local/lmp-size-4-minicluster-size-4/log.out" +LOGFILE="/home/vanessa/Desktop/Code/flux/flux-cloud/tests/lammps/data/minikube/k8s-size-4-local/lmp-size-4-minicluster-size-4/log.out" print_magenta " apply : ${CRD}" print_magenta " job : ${JOB}" diff --git a/tests/lammps/data/k8s-size-4-local/.scripts/minicluster-size-2.yaml b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/minicluster-size-2.yaml similarity index 100% rename from tests/lammps/data/k8s-size-4-local/.scripts/minicluster-size-2.yaml rename to tests/lammps/data/minikube/k8s-size-4-local/.scripts/minicluster-size-2.yaml diff --git a/tests/lammps/data/k8s-size-4-local/.scripts/minicluster-size-4.yaml b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/minicluster-size-4.yaml similarity index 100% rename from tests/lammps/data/k8s-size-4-local/.scripts/minicluster-size-4.yaml rename to tests/lammps/data/minikube/k8s-size-4-local/.scripts/minicluster-size-4.yaml diff --git a/tests/lammps/data/minikube/k8s-size-4-local/.scripts/minikube-version.yaml b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/minikube-version.yaml new file mode 100644 index 0000000..db5c8b5 --- /dev/null +++ b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/minikube-version.yaml @@ -0,0 +1,14 @@ +buildctl: buildctl github.com/moby/buildkit v0.10.3 c8d25d9a103b70dc300a4fd55e7e576472284e31 +commit: 62e108c3dfdec8029a890ad6d8ef96b6461426dc +containerd: containerd containerd.io 1.6.6 10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1 +cri-dockerd: "" +crictl: crictl version v1.21.0 +crio: crio version 1.24.1 +crun: crun version UNKNOWN +ctr: ctr containerd.io 1.6.6 +docker: Docker version 20.10.17, build 100c701 +dockerd: Docker version 20.10.17, build a89b842 +minikubeVersion: v1.26.1 +podman: podman version 3.4.2 +runc: runc version 1.1.2 + diff --git a/tests/lammps/data/minikube/k8s-size-4-local/.scripts/nodes-size-4.json b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/nodes-size-4.json new file mode 100644 index 0000000..c7a4fe6 --- /dev/null +++ b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/nodes-size-4.json @@ -0,0 +1,722 @@ +{ + "apiVersion": "v1", + "items": [ + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "kubeadm.alpha.kubernetes.io/cri-socket": "unix:///var/run/cri-dockerd.sock", + "node.alpha.kubernetes.io/ttl": "0", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2023-01-29T00:02:18Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "minikube", + "kubernetes.io/os": "linux", + "minikube.k8s.io/commit": "62e108c3dfdec8029a890ad6d8ef96b6461426dc", + "minikube.k8s.io/name": "minikube", + "minikube.k8s.io/primary": "true", + "minikube.k8s.io/updated_at": "2023_01_28T17_02_22_0700", + "minikube.k8s.io/version": "v1.26.1", + "node-role.kubernetes.io/control-plane": "", + "node.kubernetes.io/exclude-from-external-load-balancers": "" + }, + "name": "minikube", + "resourceVersion": "425", + "uid": "8e77ca97-2322-4a6b-aa85-03018da93f19" + }, + "spec": { + "podCIDR": "10.244.0.0/24", + "podCIDRs": [ + "10.244.0.0/24" + ] + }, + "status": { + "addresses": [ + { + "address": "192.168.49.2", + "type": "InternalIP" + }, + { + "address": "minikube", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "8", + "ephemeral-storage": "490617784Ki", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "16063412Ki", + "pods": "110" + }, + "capacity": { + "cpu": "8", + "ephemeral-storage": "490617784Ki", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "16063412Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2023-01-29T00:02:53Z", + "lastTransitionTime": "2023-01-29T00:02:16Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2023-01-29T00:02:53Z", + "lastTransitionTime": "2023-01-29T00:02:16Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2023-01-29T00:02:53Z", + "lastTransitionTime": "2023-01-29T00:02:16Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2023-01-29T00:02:53Z", + "lastTransitionTime": "2023-01-29T00:02:53Z", + "message": "kubelet is posting ready status", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [ + { + "names": [ + "k8s.gcr.io/etcd@sha256:13f53ed1d91e2e11aac476ee9a0269fdda6cc4874eba903efd40daf50c55eee5", + "k8s.gcr.io/etcd:3.5.3-0" + ], + "sizeBytes": 299495233 + }, + { + "names": [ + "k8s.gcr.io/kube-apiserver@sha256:a04609b85962da7e6531d32b75f652b4fb9f5fe0b0ee0aa160856faad8ec5d96", + "k8s.gcr.io/kube-apiserver:v1.24.3" + ], + "sizeBytes": 129710737 + }, + { + "names": [ + "k8s.gcr.io/kube-controller-manager@sha256:f504eead8b8674ebc9067370ef51abbdc531b4a81813bfe464abccb8c76b6a53", + "k8s.gcr.io/kube-controller-manager:v1.24.3" + ], + "sizeBytes": 119360464 + }, + { + "names": [ + "k8s.gcr.io/kube-proxy@sha256:c1b135231b5b1a6799346cd701da4b59e5b7ef8e694ec7b04fb23b8dbe144137", + "k8s.gcr.io/kube-proxy:v1.24.3" + ], + "sizeBytes": 109939784 + }, + { + "names": [ + "kindest/kindnetd@sha256:e2d4d675dcf28a90102ad5219b75c5a0ee096c4321247dfae31dd1467611a9fb", + "kindest/kindnetd:v20220726-ed811e41" + ], + "sizeBytes": 61761170 + }, + { + "names": [ + "k8s.gcr.io/kube-scheduler@sha256:e199523298224cd9f2a9a43c7c2c37fa57aff87648ed1e1de9984eba6f6005f0", + "k8s.gcr.io/kube-scheduler:v1.24.3" + ], + "sizeBytes": 50989989 + }, + { + "names": [ + "k8s.gcr.io/coredns/coredns@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e", + "k8s.gcr.io/coredns/coredns:v1.8.6" + ], + "sizeBytes": 46829283 + }, + { + "names": [ + "gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944", + "gcr.io/k8s-minikube/storage-provisioner:v5" + ], + "sizeBytes": 31465472 + }, + { + "names": [ + "k8s.gcr.io/pause@sha256:bb6ed397957e9ca7c65ada0db5c5d1c707c9c8afc80a94acbe69f3ae76988f0c", + "k8s.gcr.io/pause:3.7" + ], + "sizeBytes": 711184 + }, + { + "names": [ + "k8s.gcr.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db", + "k8s.gcr.io/pause:3.6" + ], + "sizeBytes": 682696 + } + ], + "nodeInfo": { + "architecture": "amd64", + "bootID": "e79e066c-0c73-47a0-af4a-7da769e1e766", + "containerRuntimeVersion": "docker://20.10.17", + "kernelVersion": "5.15.0-58-generic", + "kubeProxyVersion": "v1.24.3", + "kubeletVersion": "v1.24.3", + "machineID": "4c192b04687c403f8fbb9bc7975b21b3", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.4 LTS", + "systemUUID": "661ce3ad-558f-461a-b2a7-c5ee87baf253" + } + } + }, + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/cri-dockerd.sock", + "node.alpha.kubernetes.io/ttl": "0", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2023-01-29T00:02:40Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "minikube-m02", + "kubernetes.io/os": "linux" + }, + "name": "minikube-m02", + "resourceVersion": "512", + "uid": "8fe47979-9f4d-4d0f-a209-e272353b471c" + }, + "spec": { + "podCIDR": "10.244.1.0/24", + "podCIDRs": [ + "10.244.1.0/24" + ] + }, + "status": { + "addresses": [ + { + "address": "192.168.49.3", + "type": "InternalIP" + }, + { + "address": "minikube-m02", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "8", + "ephemeral-storage": "490617784Ki", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "16063412Ki", + "pods": "110" + }, + "capacity": { + "cpu": "8", + "ephemeral-storage": "490617784Ki", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "16063412Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2023-01-29T00:03:11Z", + "lastTransitionTime": "2023-01-29T00:02:40Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2023-01-29T00:03:11Z", + "lastTransitionTime": "2023-01-29T00:02:40Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2023-01-29T00:03:11Z", + "lastTransitionTime": "2023-01-29T00:02:40Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2023-01-29T00:03:11Z", + "lastTransitionTime": "2023-01-29T00:03:01Z", + "message": "kubelet is posting ready status", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [ + { + "names": [ + "k8s.gcr.io/etcd@sha256:13f53ed1d91e2e11aac476ee9a0269fdda6cc4874eba903efd40daf50c55eee5", + "k8s.gcr.io/etcd:3.5.3-0" + ], + "sizeBytes": 299495233 + }, + { + "names": [ + "k8s.gcr.io/kube-apiserver@sha256:a04609b85962da7e6531d32b75f652b4fb9f5fe0b0ee0aa160856faad8ec5d96", + "k8s.gcr.io/kube-apiserver:v1.24.3" + ], + "sizeBytes": 129710737 + }, + { + "names": [ + "k8s.gcr.io/kube-controller-manager@sha256:f504eead8b8674ebc9067370ef51abbdc531b4a81813bfe464abccb8c76b6a53", + "k8s.gcr.io/kube-controller-manager:v1.24.3" + ], + "sizeBytes": 119360464 + }, + { + "names": [ + "k8s.gcr.io/kube-proxy@sha256:c1b135231b5b1a6799346cd701da4b59e5b7ef8e694ec7b04fb23b8dbe144137", + "k8s.gcr.io/kube-proxy:v1.24.3" + ], + "sizeBytes": 109939784 + }, + { + "names": [ + "kindest/kindnetd@sha256:e2d4d675dcf28a90102ad5219b75c5a0ee096c4321247dfae31dd1467611a9fb", + "kindest/kindnetd:v20220726-ed811e41" + ], + "sizeBytes": 61761170 + }, + { + "names": [ + "k8s.gcr.io/kube-scheduler@sha256:e199523298224cd9f2a9a43c7c2c37fa57aff87648ed1e1de9984eba6f6005f0", + "k8s.gcr.io/kube-scheduler:v1.24.3" + ], + "sizeBytes": 50989989 + }, + { + "names": [ + "k8s.gcr.io/coredns/coredns@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e", + "k8s.gcr.io/coredns/coredns:v1.8.6" + ], + "sizeBytes": 46829283 + }, + { + "names": [ + "gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944", + "gcr.io/k8s-minikube/storage-provisioner:v5" + ], + "sizeBytes": 31465472 + }, + { + "names": [ + "k8s.gcr.io/pause@sha256:bb6ed397957e9ca7c65ada0db5c5d1c707c9c8afc80a94acbe69f3ae76988f0c", + "k8s.gcr.io/pause:3.7" + ], + "sizeBytes": 711184 + }, + { + "names": [ + "k8s.gcr.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db", + "k8s.gcr.io/pause:3.6" + ], + "sizeBytes": 682696 + } + ], + "nodeInfo": { + "architecture": "amd64", + "bootID": "e79e066c-0c73-47a0-af4a-7da769e1e766", + "containerRuntimeVersion": "docker://20.10.17", + "kernelVersion": "5.15.0-58-generic", + "kubeProxyVersion": "v1.24.3", + "kubeletVersion": "v1.24.3", + "machineID": "4c192b04687c403f8fbb9bc7975b21b3", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.4 LTS", + "systemUUID": "31678799-a4fb-451d-a8bc-cbae630f4702" + } + } + }, + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/cri-dockerd.sock", + "node.alpha.kubernetes.io/ttl": "0", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2023-01-29T00:02:56Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "minikube-m03", + "kubernetes.io/os": "linux" + }, + "name": "minikube-m03", + "resourceVersion": "556", + "uid": "2a2cbeb4-d73a-41e0-83ce-7c4fa596fc19" + }, + "spec": { + "podCIDR": "10.244.2.0/24", + "podCIDRs": [ + "10.244.2.0/24" + ], + "taints": [ + { + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "timeAdded": "2023-01-29T00:03:05Z" + } + ] + }, + "status": { + "addresses": [ + { + "address": "192.168.49.4", + "type": "InternalIP" + }, + { + "address": "minikube-m03", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "8", + "ephemeral-storage": "490617784Ki", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "16063412Ki", + "pods": "110" + }, + "capacity": { + "cpu": "8", + "ephemeral-storage": "490617784Ki", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "16063412Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2023-01-29T00:03:16Z", + "lastTransitionTime": "2023-01-29T00:02:56Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2023-01-29T00:03:16Z", + "lastTransitionTime": "2023-01-29T00:02:56Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2023-01-29T00:03:16Z", + "lastTransitionTime": "2023-01-29T00:02:56Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2023-01-29T00:03:16Z", + "lastTransitionTime": "2023-01-29T00:03:16Z", + "message": "kubelet is posting ready status", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [ + { + "names": [ + "k8s.gcr.io/etcd@sha256:13f53ed1d91e2e11aac476ee9a0269fdda6cc4874eba903efd40daf50c55eee5", + "k8s.gcr.io/etcd:3.5.3-0" + ], + "sizeBytes": 299495233 + }, + { + "names": [ + "k8s.gcr.io/kube-apiserver@sha256:a04609b85962da7e6531d32b75f652b4fb9f5fe0b0ee0aa160856faad8ec5d96", + "k8s.gcr.io/kube-apiserver:v1.24.3" + ], + "sizeBytes": 129710737 + }, + { + "names": [ + "k8s.gcr.io/kube-controller-manager@sha256:f504eead8b8674ebc9067370ef51abbdc531b4a81813bfe464abccb8c76b6a53", + "k8s.gcr.io/kube-controller-manager:v1.24.3" + ], + "sizeBytes": 119360464 + }, + { + "names": [ + "k8s.gcr.io/kube-proxy@sha256:c1b135231b5b1a6799346cd701da4b59e5b7ef8e694ec7b04fb23b8dbe144137", + "k8s.gcr.io/kube-proxy:v1.24.3" + ], + "sizeBytes": 109939784 + }, + { + "names": [ + "k8s.gcr.io/kube-scheduler@sha256:e199523298224cd9f2a9a43c7c2c37fa57aff87648ed1e1de9984eba6f6005f0", + "k8s.gcr.io/kube-scheduler:v1.24.3" + ], + "sizeBytes": 50989989 + }, + { + "names": [ + "k8s.gcr.io/coredns/coredns@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e", + "k8s.gcr.io/coredns/coredns:v1.8.6" + ], + "sizeBytes": 46829283 + }, + { + "names": [ + "gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944", + "gcr.io/k8s-minikube/storage-provisioner:v5" + ], + "sizeBytes": 31465472 + }, + { + "names": [ + "k8s.gcr.io/pause@sha256:bb6ed397957e9ca7c65ada0db5c5d1c707c9c8afc80a94acbe69f3ae76988f0c", + "k8s.gcr.io/pause:3.7" + ], + "sizeBytes": 711184 + } + ], + "nodeInfo": { + "architecture": "amd64", + "bootID": "e79e066c-0c73-47a0-af4a-7da769e1e766", + "containerRuntimeVersion": "docker://20.10.17", + "kernelVersion": "5.15.0-58-generic", + "kubeProxyVersion": "v1.24.3", + "kubeletVersion": "v1.24.3", + "machineID": "4c192b04687c403f8fbb9bc7975b21b3", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.4 LTS", + "systemUUID": "b50b1c2b-6377-43f3-b382-d01cb0648758" + } + } + }, + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/cri-dockerd.sock", + "node.alpha.kubernetes.io/ttl": "0", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2023-01-29T00:03:13Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "minikube-m04", + "kubernetes.io/os": "linux" + }, + "name": "minikube-m04", + "resourceVersion": "559", + "uid": "66ff8a5e-2f92-4a94-bbaf-627ed204c2ca" + }, + "spec": { + "podCIDR": "10.244.3.0/24", + "podCIDRs": [ + "10.244.3.0/24" + ], + "taints": [ + { + "effect": "NoSchedule", + "key": "node.kubernetes.io/not-ready" + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "timeAdded": "2023-01-29T00:03:15Z" + } + ] + }, + "status": { + "addresses": [ + { + "address": "192.168.49.5", + "type": "InternalIP" + }, + { + "address": "minikube-m04", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "8", + "ephemeral-storage": "490617784Ki", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "16063412Ki", + "pods": "110" + }, + "capacity": { + "cpu": "8", + "ephemeral-storage": "490617784Ki", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "16063412Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2023-01-29T00:03:13Z", + "lastTransitionTime": "2023-01-29T00:03:13Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2023-01-29T00:03:13Z", + "lastTransitionTime": "2023-01-29T00:03:13Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2023-01-29T00:03:13Z", + "lastTransitionTime": "2023-01-29T00:03:13Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2023-01-29T00:03:13Z", + "lastTransitionTime": "2023-01-29T00:03:13Z", + "message": "[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized, CSINode is not yet initialized]", + "reason": "KubeletNotReady", + "status": "False", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [ + { + "names": [ + "k8s.gcr.io/etcd@sha256:13f53ed1d91e2e11aac476ee9a0269fdda6cc4874eba903efd40daf50c55eee5", + "k8s.gcr.io/etcd:3.5.3-0" + ], + "sizeBytes": 299495233 + }, + { + "names": [ + "k8s.gcr.io/kube-apiserver@sha256:a04609b85962da7e6531d32b75f652b4fb9f5fe0b0ee0aa160856faad8ec5d96", + "k8s.gcr.io/kube-apiserver:v1.24.3" + ], + "sizeBytes": 129710737 + }, + { + "names": [ + "k8s.gcr.io/kube-controller-manager@sha256:f504eead8b8674ebc9067370ef51abbdc531b4a81813bfe464abccb8c76b6a53", + "k8s.gcr.io/kube-controller-manager:v1.24.3" + ], + "sizeBytes": 119360464 + }, + { + "names": [ + "k8s.gcr.io/kube-proxy@sha256:c1b135231b5b1a6799346cd701da4b59e5b7ef8e694ec7b04fb23b8dbe144137", + "k8s.gcr.io/kube-proxy:v1.24.3" + ], + "sizeBytes": 109939784 + }, + { + "names": [ + "k8s.gcr.io/kube-scheduler@sha256:e199523298224cd9f2a9a43c7c2c37fa57aff87648ed1e1de9984eba6f6005f0", + "k8s.gcr.io/kube-scheduler:v1.24.3" + ], + "sizeBytes": 50989989 + }, + { + "names": [ + "k8s.gcr.io/coredns/coredns@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e", + "k8s.gcr.io/coredns/coredns:v1.8.6" + ], + "sizeBytes": 46829283 + }, + { + "names": [ + "gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944", + "gcr.io/k8s-minikube/storage-provisioner:v5" + ], + "sizeBytes": 31465472 + }, + { + "names": [ + "k8s.gcr.io/pause@sha256:bb6ed397957e9ca7c65ada0db5c5d1c707c9c8afc80a94acbe69f3ae76988f0c", + "k8s.gcr.io/pause:3.7" + ], + "sizeBytes": 711184 + } + ], + "nodeInfo": { + "architecture": "amd64", + "bootID": "e79e066c-0c73-47a0-af4a-7da769e1e766", + "containerRuntimeVersion": "docker://20.10.17", + "kernelVersion": "5.15.0-58-generic", + "kubeProxyVersion": "v1.24.3", + "kubeletVersion": "v1.24.3", + "machineID": "4c192b04687c403f8fbb9bc7975b21b3", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.4 LTS", + "systemUUID": "e8f8b5e9-4607-4428-a9c9-751e14ebd529" + } + } + } + ], + "kind": "List", + "metadata": { + "resourceVersion": "" + } +} diff --git a/tests/lammps/data/minikube/k8s-size-4-local/.scripts/nodes-size-4.txt b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/nodes-size-4.txt new file mode 100644 index 0000000..e8a3d97 --- /dev/null +++ b/tests/lammps/data/minikube/k8s-size-4-local/.scripts/nodes-size-4.txt @@ -0,0 +1,336 @@ +Name: minikube +Roles: control-plane +Labels: beta.kubernetes.io/arch=amd64 + beta.kubernetes.io/os=linux + kubernetes.io/arch=amd64 + kubernetes.io/hostname=minikube + kubernetes.io/os=linux + minikube.k8s.io/commit=62e108c3dfdec8029a890ad6d8ef96b6461426dc + minikube.k8s.io/name=minikube + minikube.k8s.io/primary=true + minikube.k8s.io/updated_at=2023_01_28T17_02_22_0700 + minikube.k8s.io/version=v1.26.1 + node-role.kubernetes.io/control-plane= + node.kubernetes.io/exclude-from-external-load-balancers= +Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock + node.alpha.kubernetes.io/ttl: 0 + volumes.kubernetes.io/controller-managed-attach-detach: true +CreationTimestamp: Sat, 28 Jan 2023 17:02:18 -0700 +Taints: +Unschedulable: false +Lease: + HolderIdentity: minikube + AcquireTime: + RenewTime: Sat, 28 Jan 2023 17:03:13 -0700 +Conditions: + Type Status LastHeartbeatTime LastTransitionTime Reason Message + ---- ------ ----------------- ------------------ ------ ------- + MemoryPressure False Sat, 28 Jan 2023 17:02:53 -0700 Sat, 28 Jan 2023 17:02:16 -0700 KubeletHasSufficientMemory kubelet has sufficient memory available + DiskPressure False Sat, 28 Jan 2023 17:02:53 -0700 Sat, 28 Jan 2023 17:02:16 -0700 KubeletHasNoDiskPressure kubelet has no disk pressure + PIDPressure False Sat, 28 Jan 2023 17:02:53 -0700 Sat, 28 Jan 2023 17:02:16 -0700 KubeletHasSufficientPID kubelet has sufficient PID available + Ready True Sat, 28 Jan 2023 17:02:53 -0700 Sat, 28 Jan 2023 17:02:53 -0700 KubeletReady kubelet is posting ready status +Addresses: + InternalIP: 192.168.49.2 + Hostname: minikube +Capacity: + cpu: 8 + ephemeral-storage: 490617784Ki + hugepages-1Gi: 0 + hugepages-2Mi: 0 + memory: 16063412Ki + pods: 110 +Allocatable: + cpu: 8 + ephemeral-storage: 490617784Ki + hugepages-1Gi: 0 + hugepages-2Mi: 0 + memory: 16063412Ki + pods: 110 +System Info: + Machine ID: 4c192b04687c403f8fbb9bc7975b21b3 + System UUID: 661ce3ad-558f-461a-b2a7-c5ee87baf253 + Boot ID: e79e066c-0c73-47a0-af4a-7da769e1e766 + Kernel Version: 5.15.0-58-generic + OS Image: Ubuntu 20.04.4 LTS + Operating System: linux + Architecture: amd64 + Container Runtime Version: docker://20.10.17 + Kubelet Version: v1.24.3 + Kube-Proxy Version: v1.24.3 +PodCIDR: 10.244.0.0/24 +PodCIDRs: 10.244.0.0/24 +Non-terminated Pods: (8 in total) + Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age + --------- ---- ------------ ---------- --------------- ------------- --- + kube-system coredns-6d4b75cb6d-kvxdc 100m (1%) 0 (0%) 70Mi (0%) 170Mi (1%) 44s + kube-system etcd-minikube 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 58s + kube-system kindnet-slln4 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 44s + kube-system kube-apiserver-minikube 250m (3%) 0 (0%) 0 (0%) 0 (0%) 58s + kube-system kube-controller-manager-minikube 200m (2%) 0 (0%) 0 (0%) 0 (0%) 58s + kube-system kube-proxy-4zfhq 0 (0%) 0 (0%) 0 (0%) 0 (0%) 44s + kube-system kube-scheduler-minikube 100m (1%) 0 (0%) 0 (0%) 0 (0%) 58s + kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 57s +Allocated resources: + (Total limits may be over 100 percent, i.e., overcommitted.) + Resource Requests Limits + -------- -------- ------ + cpu 850m (10%) 100m (1%) + memory 220Mi (1%) 220Mi (1%) + ephemeral-storage 0 (0%) 0 (0%) + hugepages-1Gi 0 (0%) 0 (0%) + hugepages-2Mi 0 (0%) 0 (0%) +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 42s kube-proxy + Normal NodeHasSufficientMemory 67s (x5 over 67s) kubelet Node minikube status is now: NodeHasSufficientMemory + Normal NodeHasNoDiskPressure 67s (x5 over 67s) kubelet Node minikube status is now: NodeHasNoDiskPressure + Normal NodeHasSufficientPID 67s (x5 over 67s) kubelet Node minikube status is now: NodeHasSufficientPID + Normal Starting 58s kubelet Starting kubelet. + Normal NodeAllocatableEnforced 58s kubelet Updated Node Allocatable limit across pods + Normal NodeHasSufficientMemory 58s kubelet Node minikube status is now: NodeHasSufficientMemory + Normal NodeHasNoDiskPressure 58s kubelet Node minikube status is now: NodeHasNoDiskPressure + Normal NodeHasSufficientPID 58s kubelet Node minikube status is now: NodeHasSufficientPID + Normal RegisteredNode 45s node-controller Node minikube event: Registered Node minikube in Controller + Normal NodeReady 27s kubelet Node minikube status is now: NodeReady + + +Name: minikube-m02 +Roles: +Labels: beta.kubernetes.io/arch=amd64 + beta.kubernetes.io/os=linux + kubernetes.io/arch=amd64 + kubernetes.io/hostname=minikube-m02 + kubernetes.io/os=linux +Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/cri-dockerd.sock + node.alpha.kubernetes.io/ttl: 0 + volumes.kubernetes.io/controller-managed-attach-detach: true +CreationTimestamp: Sat, 28 Jan 2023 17:02:40 -0700 +Taints: +Unschedulable: false +Lease: + HolderIdentity: minikube-m02 + AcquireTime: + RenewTime: Sat, 28 Jan 2023 17:03:11 -0700 +Conditions: + Type Status LastHeartbeatTime LastTransitionTime Reason Message + ---- ------ ----------------- ------------------ ------ ------- + MemoryPressure False Sat, 28 Jan 2023 17:03:11 -0700 Sat, 28 Jan 2023 17:02:40 -0700 KubeletHasSufficientMemory kubelet has sufficient memory available + DiskPressure False Sat, 28 Jan 2023 17:03:11 -0700 Sat, 28 Jan 2023 17:02:40 -0700 KubeletHasNoDiskPressure kubelet has no disk pressure + PIDPressure False Sat, 28 Jan 2023 17:03:11 -0700 Sat, 28 Jan 2023 17:02:40 -0700 KubeletHasSufficientPID kubelet has sufficient PID available + Ready True Sat, 28 Jan 2023 17:03:11 -0700 Sat, 28 Jan 2023 17:03:01 -0700 KubeletReady kubelet is posting ready status +Addresses: + InternalIP: 192.168.49.3 + Hostname: minikube-m02 +Capacity: + cpu: 8 + ephemeral-storage: 490617784Ki + hugepages-1Gi: 0 + hugepages-2Mi: 0 + memory: 16063412Ki + pods: 110 +Allocatable: + cpu: 8 + ephemeral-storage: 490617784Ki + hugepages-1Gi: 0 + hugepages-2Mi: 0 + memory: 16063412Ki + pods: 110 +System Info: + Machine ID: 4c192b04687c403f8fbb9bc7975b21b3 + System UUID: 31678799-a4fb-451d-a8bc-cbae630f4702 + Boot ID: e79e066c-0c73-47a0-af4a-7da769e1e766 + Kernel Version: 5.15.0-58-generic + OS Image: Ubuntu 20.04.4 LTS + Operating System: linux + Architecture: amd64 + Container Runtime Version: docker://20.10.17 + Kubelet Version: v1.24.3 + Kube-Proxy Version: v1.24.3 +PodCIDR: 10.244.1.0/24 +PodCIDRs: 10.244.1.0/24 +Non-terminated Pods: (3 in total) + Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age + --------- ---- ------------ ---------- --------------- ------------- --- + kube-system kindnet-449kr 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 40s + kube-system kube-proxy-hn4dx 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40s + operator-system operator-controller-manager-65d89d4ffb-2rkbd 15m (0%) 1 (12%) 128Mi (0%) 256Mi (1%) 1s +Allocated resources: + (Total limits may be over 100 percent, i.e., overcommitted.) + Resource Requests Limits + -------- -------- ------ + cpu 115m (1%) 1100m (13%) + memory 178Mi (1%) 306Mi (1%) + ephemeral-storage 0 (0%) 0 (0%) + hugepages-1Gi 0 (0%) 0 (0%) + hugepages-2Mi 0 (0%) 0 (0%) +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 35s kube-proxy + Normal Starting 40s kubelet Starting kubelet. + Normal NodeHasSufficientMemory 40s (x2 over 40s) kubelet Node minikube-m02 status is now: NodeHasSufficientMemory + Normal NodeHasNoDiskPressure 40s (x2 over 40s) kubelet Node minikube-m02 status is now: NodeHasNoDiskPressure + Normal NodeHasSufficientPID 40s (x2 over 40s) kubelet Node minikube-m02 status is now: NodeHasSufficientPID + Normal NodeAllocatableEnforced 40s kubelet Updated Node Allocatable limit across pods + Normal RegisteredNode 35s node-controller Node minikube-m02 event: Registered Node minikube-m02 in Controller + Normal NodeReady 19s kubelet Node minikube-m02 status is now: NodeReady + + +Name: minikube-m03 +Roles: +Labels: beta.kubernetes.io/arch=amd64 + beta.kubernetes.io/os=linux + kubernetes.io/arch=amd64 + kubernetes.io/hostname=minikube-m03 + kubernetes.io/os=linux +Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/cri-dockerd.sock + node.alpha.kubernetes.io/ttl: 0 + volumes.kubernetes.io/controller-managed-attach-detach: true +CreationTimestamp: Sat, 28 Jan 2023 17:02:56 -0700 +Taints: node.kubernetes.io/not-ready:NoExecute +Unschedulable: false +Lease: + HolderIdentity: minikube-m03 + AcquireTime: + RenewTime: Sat, 28 Jan 2023 17:03:16 -0700 +Conditions: + Type Status LastHeartbeatTime LastTransitionTime Reason Message + ---- ------ ----------------- ------------------ ------ ------- + MemoryPressure False Sat, 28 Jan 2023 17:03:16 -0700 Sat, 28 Jan 2023 17:02:56 -0700 KubeletHasSufficientMemory kubelet has sufficient memory available + DiskPressure False Sat, 28 Jan 2023 17:03:16 -0700 Sat, 28 Jan 2023 17:02:56 -0700 KubeletHasNoDiskPressure kubelet has no disk pressure + PIDPressure False Sat, 28 Jan 2023 17:03:16 -0700 Sat, 28 Jan 2023 17:02:56 -0700 KubeletHasSufficientPID kubelet has sufficient PID available + Ready True Sat, 28 Jan 2023 17:03:16 -0700 Sat, 28 Jan 2023 17:03:16 -0700 KubeletReady kubelet is posting ready status +Addresses: + InternalIP: 192.168.49.4 + Hostname: minikube-m03 +Capacity: + cpu: 8 + ephemeral-storage: 490617784Ki + hugepages-1Gi: 0 + hugepages-2Mi: 0 + memory: 16063412Ki + pods: 110 +Allocatable: + cpu: 8 + ephemeral-storage: 490617784Ki + hugepages-1Gi: 0 + hugepages-2Mi: 0 + memory: 16063412Ki + pods: 110 +System Info: + Machine ID: 4c192b04687c403f8fbb9bc7975b21b3 + System UUID: b50b1c2b-6377-43f3-b382-d01cb0648758 + Boot ID: e79e066c-0c73-47a0-af4a-7da769e1e766 + Kernel Version: 5.15.0-58-generic + OS Image: Ubuntu 20.04.4 LTS + Operating System: linux + Architecture: amd64 + Container Runtime Version: docker://20.10.17 + Kubelet Version: v1.24.3 + Kube-Proxy Version: v1.24.3 +PodCIDR: 10.244.2.0/24 +PodCIDRs: 10.244.2.0/24 +Non-terminated Pods: (2 in total) + Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age + --------- ---- ------------ ---------- --------------- ------------- --- + kube-system kindnet-j5pdw 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 24s + kube-system kube-proxy-czrgj 0 (0%) 0 (0%) 0 (0%) 0 (0%) 24s +Allocated resources: + (Total limits may be over 100 percent, i.e., overcommitted.) + Resource Requests Limits + -------- -------- ------ + cpu 100m (1%) 100m (1%) + memory 50Mi (0%) 50Mi (0%) + ephemeral-storage 0 (0%) 0 (0%) + hugepages-1Gi 0 (0%) 0 (0%) + hugepages-2Mi 0 (0%) 0 (0%) +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 20s kube-proxy + Normal Starting 24s kubelet Starting kubelet. + Normal NodeHasSufficientMemory 24s (x2 over 24s) kubelet Node minikube-m03 status is now: NodeHasSufficientMemory + Normal NodeHasNoDiskPressure 24s (x2 over 24s) kubelet Node minikube-m03 status is now: NodeHasNoDiskPressure + Normal NodeHasSufficientPID 24s (x2 over 24s) kubelet Node minikube-m03 status is now: NodeHasSufficientPID + Normal NodeAllocatableEnforced 24s kubelet Updated Node Allocatable limit across pods + Normal RegisteredNode 20s node-controller Node minikube-m03 event: Registered Node minikube-m03 in Controller + Normal NodeReady 4s kubelet Node minikube-m03 status is now: NodeReady + + +Name: minikube-m04 +Roles: +Labels: beta.kubernetes.io/arch=amd64 + beta.kubernetes.io/os=linux + kubernetes.io/arch=amd64 + kubernetes.io/hostname=minikube-m04 + kubernetes.io/os=linux +Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/cri-dockerd.sock + node.alpha.kubernetes.io/ttl: 0 + volumes.kubernetes.io/controller-managed-attach-detach: true +CreationTimestamp: Sat, 28 Jan 2023 17:03:13 -0700 +Taints: node.kubernetes.io/not-ready:NoExecute + node.kubernetes.io/not-ready:NoSchedule +Unschedulable: false +Lease: Failed to get lease: leases.coordination.k8s.io "minikube-m04" not found +Conditions: + Type Status LastHeartbeatTime LastTransitionTime Reason Message + ---- ------ ----------------- ------------------ ------ ------- + MemoryPressure False Sat, 28 Jan 2023 17:03:13 -0700 Sat, 28 Jan 2023 17:03:13 -0700 KubeletHasSufficientMemory kubelet has sufficient memory available + DiskPressure False Sat, 28 Jan 2023 17:03:13 -0700 Sat, 28 Jan 2023 17:03:13 -0700 KubeletHasNoDiskPressure kubelet has no disk pressure + PIDPressure False Sat, 28 Jan 2023 17:03:13 -0700 Sat, 28 Jan 2023 17:03:13 -0700 KubeletHasSufficientPID kubelet has sufficient PID available + Ready False Sat, 28 Jan 2023 17:03:13 -0700 Sat, 28 Jan 2023 17:03:13 -0700 KubeletNotReady [container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized, CSINode is not yet initialized] +Addresses: + InternalIP: 192.168.49.5 + Hostname: minikube-m04 +Capacity: + cpu: 8 + ephemeral-storage: 490617784Ki + hugepages-1Gi: 0 + hugepages-2Mi: 0 + memory: 16063412Ki + pods: 110 +Allocatable: + cpu: 8 + ephemeral-storage: 490617784Ki + hugepages-1Gi: 0 + hugepages-2Mi: 0 + memory: 16063412Ki + pods: 110 +System Info: + Machine ID: 4c192b04687c403f8fbb9bc7975b21b3 + System UUID: e8f8b5e9-4607-4428-a9c9-751e14ebd529 + Boot ID: e79e066c-0c73-47a0-af4a-7da769e1e766 + Kernel Version: 5.15.0-58-generic + OS Image: Ubuntu 20.04.4 LTS + Operating System: linux + Architecture: amd64 + Container Runtime Version: docker://20.10.17 + Kubelet Version: v1.24.3 + Kube-Proxy Version: v1.24.3 +PodCIDR: 10.244.3.0/24 +PodCIDRs: 10.244.3.0/24 +Non-terminated Pods: (2 in total) + Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age + --------- ---- ------------ ---------- --------------- ------------- --- + kube-system kindnet-rxd6l 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 7s + kube-system kube-proxy-rd8cl 0 (0%) 0 (0%) 0 (0%) 0 (0%) 7s +Allocated resources: + (Total limits may be over 100 percent, i.e., overcommitted.) + Resource Requests Limits + -------- -------- ------ + cpu 100m (1%) 100m (1%) + memory 50Mi (0%) 50Mi (0%) + ephemeral-storage 0 (0%) 0 (0%) + hugepages-1Gi 0 (0%) 0 (0%) + hugepages-2Mi 0 (0%) 0 (0%) +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 4s kube-proxy + Normal Starting 8s kubelet Starting kubelet. + Normal NodeHasSufficientMemory 7s (x2 over 8s) kubelet Node minikube-m04 status is now: NodeHasSufficientMemory + Normal NodeHasNoDiskPressure 7s (x2 over 8s) kubelet Node minikube-m04 status is now: NodeHasNoDiskPressure + Normal NodeHasSufficientPID 7s (x2 over 8s) kubelet Node minikube-m04 status is now: NodeHasSufficientPID + Normal NodeAllocatableEnforced 7s kubelet Updated Node Allocatable limit across pods + Normal RegisteredNode 5s node-controller Node minikube-m04 event: Registered Node minikube-m04 in Controller diff --git a/tests/lammps/data/k8s-size-4-local/lmp-size-2-minicluster-size-2/log.out b/tests/lammps/data/minikube/k8s-size-4-local/lmp-size-2-minicluster-size-2/log.out similarity index 81% rename from tests/lammps/data/k8s-size-4-local/lmp-size-2-minicluster-size-2/log.out rename to tests/lammps/data/minikube/k8s-size-4-local/lmp-size-2-minicluster-size-2/log.out index 8a9dabe..1cd4381 100644 --- a/tests/lammps/data/k8s-size-4-local/lmp-size-2-minicluster-size-2/log.out +++ b/tests/lammps/data/minikube/k8s-size-4-local/lmp-size-2-minicluster-size-2/log.out @@ -8,7 +8,7 @@ Reading data file ... 304 atoms reading velocities ... 304 velocities - read_data CPU = 0.004 seconds + read_data CPU = 0.006 seconds Replicating atoms ... triclinic box = (0.0000000 0.0000000 0.0000000) to (44.652000 22.282400 27.557932) with tilt (0.0000000 -10.052060 0.0000000) 1 by 1 by 1 MPI processor grid @@ -51,20 +51,20 @@ Step Temp PotEng Press E_vdwl E_coul Volume 80 293.58677 -113.25831 5993.4151 -111.55946 -1.6988533 27418.867 90 300.62636 -113.27925 7202.8651 -111.58069 -1.6985591 27418.867 100 305.38276 -113.29357 10085.748 -111.59518 -1.6983875 27418.867 -Loop time of 19.799 on 1 procs for 100 steps with 2432 atoms +Loop time of 18.2655 on 1 procs for 100 steps with 2432 atoms -Performance: 0.044 ns/day, 549.973 hours/ns, 5.051 timesteps/s -99.9% CPU use with 1 MPI tasks x 1 OpenMP threads +Performance: 0.047 ns/day, 507.375 hours/ns, 5.475 timesteps/s +100.0% CPU use with 1 MPI tasks x 1 OpenMP threads MPI task timing breakdown: Section | min time | avg time | max time |%varavg| %total --------------------------------------------------------------- -Pair | 14.611 | 14.611 | 14.611 | 0.0 | 73.80 -Neigh | 0.39598 | 0.39598 | 0.39598 | 0.0 | 2.00 -Comm | 0.0061281 | 0.0061281 | 0.0061281 | 0.0 | 0.03 -Output | 0.00025692 | 0.00025692 | 0.00025692 | 0.0 | 0.00 -Modify | 4.7848 | 4.7848 | 4.7848 | 0.0 | 24.17 -Other | | 0.001147 | | | 0.01 +Pair | 13.517 | 13.517 | 13.517 | 0.0 | 74.00 +Neigh | 0.36365 | 0.36365 | 0.36365 | 0.0 | 1.99 +Comm | 0.0058376 | 0.0058376 | 0.0058376 | 0.0 | 0.03 +Output | 0.00022723 | 0.00022723 | 0.00022723 | 0.0 | 0.00 +Modify | 4.378 | 4.378 | 4.378 | 0.0 | 23.97 +Other | | 0.0009778 | | | 0.01 Nlocal: 2432.00 ave 2432 max 2432 min Histogram: 1 0 0 0 0 0 0 0 0 0 @@ -77,4 +77,4 @@ Total # of neighbors = 823958 Ave neighs/atom = 338.79852 Neighbor list builds = 5 Dangerous builds not checked -Total wall time: 0:00:20 +Total wall time: 0:00:18 diff --git a/tests/lammps/data/k8s-size-4-local/lmp-size-4-minicluster-size-4/log.out b/tests/lammps/data/minikube/k8s-size-4-local/lmp-size-4-minicluster-size-4/log.out similarity index 83% rename from tests/lammps/data/k8s-size-4-local/lmp-size-4-minicluster-size-4/log.out rename to tests/lammps/data/minikube/k8s-size-4-local/lmp-size-4-minicluster-size-4/log.out index b66bbcc..0e0dad7 100644 --- a/tests/lammps/data/k8s-size-4-local/lmp-size-4-minicluster-size-4/log.out +++ b/tests/lammps/data/minikube/k8s-size-4-local/lmp-size-4-minicluster-size-4/log.out @@ -8,7 +8,7 @@ Reading data file ... 304 atoms reading velocities ... 304 velocities - read_data CPU = 0.004 seconds + read_data CPU = 0.006 seconds Replicating atoms ... triclinic box = (0.0000000 0.0000000 0.0000000) to (44.652000 22.282400 27.557932) with tilt (0.0000000 -10.052060 0.0000000) 1 by 1 by 1 MPI processor grid @@ -51,20 +51,20 @@ Step Temp PotEng Press E_vdwl E_coul Volume 80 293.58677 -113.25831 5993.4151 -111.55946 -1.6988533 27418.867 90 300.62636 -113.27925 7202.8651 -111.58069 -1.6985591 27418.867 100 305.38276 -113.29357 10085.748 -111.59518 -1.6983875 27418.867 -Loop time of 18.2366 on 1 procs for 100 steps with 2432 atoms +Loop time of 18.2696 on 1 procs for 100 steps with 2432 atoms -Performance: 0.047 ns/day, 506.571 hours/ns, 5.483 timesteps/s +Performance: 0.047 ns/day, 507.489 hours/ns, 5.474 timesteps/s 99.9% CPU use with 1 MPI tasks x 1 OpenMP threads MPI task timing breakdown: Section | min time | avg time | max time |%varavg| %total --------------------------------------------------------------- -Pair | 13.498 | 13.498 | 13.498 | 0.0 | 74.02 -Neigh | 0.37384 | 0.37384 | 0.37384 | 0.0 | 2.05 -Comm | 0.0057354 | 0.0057354 | 0.0057354 | 0.0 | 0.03 -Output | 0.00022959 | 0.00022959 | 0.00022959 | 0.0 | 0.00 -Modify | 4.3577 | 4.3577 | 4.3577 | 0.0 | 23.90 -Other | | 0.001158 | | | 0.01 +Pair | 13.528 | 13.528 | 13.528 | 0.0 | 74.04 +Neigh | 0.37054 | 0.37054 | 0.37054 | 0.0 | 2.03 +Comm | 0.0057855 | 0.0057855 | 0.0057855 | 0.0 | 0.03 +Output | 0.00022601 | 0.00022601 | 0.00022601 | 0.0 | 0.00 +Modify | 4.3643 | 4.3643 | 4.3643 | 0.0 | 23.89 +Other | | 0.001206 | | | 0.01 Nlocal: 2432.00 ave 2432 max 2432 min Histogram: 1 0 0 0 0 0 0 0 0 0 diff --git a/tests/lammps/data/k8s-size-4-local/meta.json b/tests/lammps/data/minikube/k8s-size-4-local/meta.json similarity index 75% rename from tests/lammps/data/k8s-size-4-local/meta.json rename to tests/lammps/data/minikube/k8s-size-4-local/meta.json index 0efdda2..ced5b66 100644 --- a/tests/lammps/data/k8s-size-4-local/meta.json +++ b/tests/lammps/data/minikube/k8s-size-4-local/meta.json @@ -1,9 +1,9 @@ { "times": { - "create-cluster": 101.871, - "minicluster-run-lmp-size-2-minicluster-size-2": 29.376, - "minicluster-run-lmp-size-4-minicluster-size-4": 156.728, - "destroy-cluster": 13.656 + "create-cluster": 100.363, + "minicluster-run-lmp-size-2-minicluster-size-2": 61.381, + "minicluster-run-lmp-size-4-minicluster-size-4": 64.028, + "destroy-cluster": 13.718 }, "size": 4, "minicluster": { diff --git a/tests/test.sh b/tests/test.sh index 0850656..ca485cb 100755 --- a/tests/test.sh +++ b/tests/test.sh @@ -45,6 +45,15 @@ fi # Check output for filename in $(find ./data -type f -print); do echo "Checking $filename"; + filebase=$(basename ${filename}) + + # Don't check these files, likely to change + if [[ "${filebase}" == "flux-operator.yaml" ]]; then + continue + fi + if [[ "${filebase}" == "nodes-size"* ]]; then + continue + fi suffix=$(echo ${filename:7}) outfile="$output/$suffix" if [[ ! -e "${outfile}" ]]; then